SlowGuess commited on
Commit
b806ed0
·
verified ·
1 Parent(s): 2c0074f

Add Batch 76316c23-d7ba-41b2-8503-b1e162b94c58

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. abductionrulestrainingtransformerstoexplainunexpectedinputs/093dc7d3-24a5-47b1-9aab-afd431172fcc_content_list.json +3 -0
  2. abductionrulestrainingtransformerstoexplainunexpectedinputs/093dc7d3-24a5-47b1-9aab-afd431172fcc_model.json +3 -0
  3. abductionrulestrainingtransformerstoexplainunexpectedinputs/093dc7d3-24a5-47b1-9aab-afd431172fcc_origin.pdf +3 -0
  4. abductionrulestrainingtransformerstoexplainunexpectedinputs/full.md +318 -0
  5. abductionrulestrainingtransformerstoexplainunexpectedinputs/images.zip +3 -0
  6. abductionrulestrainingtransformerstoexplainunexpectedinputs/layout.json +3 -0
  7. addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/135f70eb-7ce0-4fca-9ddb-ba80e70d3dc4_content_list.json +3 -0
  8. addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/135f70eb-7ce0-4fca-9ddb-ba80e70d3dc4_model.json +3 -0
  9. addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/135f70eb-7ce0-4fca-9ddb-ba80e70d3dc4_origin.pdf +3 -0
  10. addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/full.md +263 -0
  11. addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/images.zip +3 -0
  12. addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/layout.json +3 -0
  13. afeasibilitystudyofansweragnosticquestiongenerationforeducation/4c27db59-2741-43a3-814f-ccad633391c7_content_list.json +3 -0
  14. afeasibilitystudyofansweragnosticquestiongenerationforeducation/4c27db59-2741-43a3-814f-ccad633391c7_model.json +3 -0
  15. afeasibilitystudyofansweragnosticquestiongenerationforeducation/4c27db59-2741-43a3-814f-ccad633391c7_origin.pdf +3 -0
  16. afeasibilitystudyofansweragnosticquestiongenerationforeducation/full.md +219 -0
  17. afeasibilitystudyofansweragnosticquestiongenerationforeducation/images.zip +3 -0
  18. afeasibilitystudyofansweragnosticquestiongenerationforeducation/layout.json +3 -0
  19. afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/a24cb7b8-f045-4e39-a5ee-ff571c730d3e_content_list.json +3 -0
  20. afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/a24cb7b8-f045-4e39-a5ee-ff571c730d3e_model.json +3 -0
  21. afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/a24cb7b8-f045-4e39-a5ee-ff571c730d3e_origin.pdf +3 -0
  22. afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/full.md +526 -0
  23. afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/images.zip +3 -0
  24. afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/layout.json +3 -0
  25. agraphenhancedbertmodelforeventprediction/a3318d62-e30c-46fd-9e73-e5438c9559e1_content_list.json +3 -0
  26. agraphenhancedbertmodelforeventprediction/a3318d62-e30c-46fd-9e73-e5438c9559e1_model.json +3 -0
  27. agraphenhancedbertmodelforeventprediction/a3318d62-e30c-46fd-9e73-e5438c9559e1_origin.pdf +3 -0
  28. agraphenhancedbertmodelforeventprediction/full.md +388 -0
  29. agraphenhancedbertmodelforeventprediction/images.zip +3 -0
  30. agraphenhancedbertmodelforeventprediction/layout.json +3 -0
  31. alignedweightregularizersforpruningpretrainedneuralnetworks/e672d957-667e-4af7-808d-b371f76f81f2_content_list.json +3 -0
  32. alignedweightregularizersforpruningpretrainedneuralnetworks/e672d957-667e-4af7-808d-b371f76f81f2_model.json +3 -0
  33. alignedweightregularizersforpruningpretrainedneuralnetworks/e672d957-667e-4af7-808d-b371f76f81f2_origin.pdf +3 -0
  34. alignedweightregularizersforpruningpretrainedneuralnetworks/full.md +265 -0
  35. alignedweightregularizersforpruningpretrainedneuralnetworks/images.zip +3 -0
  36. alignedweightregularizersforpruningpretrainedneuralnetworks/layout.json +3 -0
  37. amrdadataaugmentationbyabstractmeaningrepresentation/7e6fbf01-bac7-4955-b27c-0dad5aedf719_content_list.json +3 -0
  38. amrdadataaugmentationbyabstractmeaningrepresentation/7e6fbf01-bac7-4955-b27c-0dad5aedf719_model.json +3 -0
  39. amrdadataaugmentationbyabstractmeaningrepresentation/7e6fbf01-bac7-4955-b27c-0dad5aedf719_origin.pdf +3 -0
  40. amrdadataaugmentationbyabstractmeaningrepresentation/full.md +357 -0
  41. amrdadataaugmentationbyabstractmeaningrepresentation/images.zip +3 -0
  42. amrdadataaugmentationbyabstractmeaningrepresentation/layout.json +3 -0
  43. anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/61085cb9-f4a8-41f8-a8c2-c428a475f780_content_list.json +3 -0
  44. anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/61085cb9-f4a8-41f8-a8c2-c428a475f780_model.json +3 -0
  45. anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/61085cb9-f4a8-41f8-a8c2-c428a475f780_origin.pdf +3 -0
  46. anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/full.md +298 -0
  47. anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/images.zip +3 -0
  48. anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/layout.json +3 -0
  49. analyzingdynamicadversarialtrainingdatainthelimit/b9bb3051-d3ff-4ead-a235-514613ae4dbc_content_list.json +3 -0
  50. analyzingdynamicadversarialtrainingdatainthelimit/b9bb3051-d3ff-4ead-a235-514613ae4dbc_model.json +3 -0
abductionrulestrainingtransformerstoexplainunexpectedinputs/093dc7d3-24a5-47b1-9aab-afd431172fcc_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a30c6c3fd4c473b5372242fcf2b4e9091dad466dcbfc82e89b770d0e6725f412
3
+ size 75004
abductionrulestrainingtransformerstoexplainunexpectedinputs/093dc7d3-24a5-47b1-9aab-afd431172fcc_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4bdb706f16cb90752e867a985fff8497aa3adff19312b4081e1c5a349335192
3
+ size 92659
abductionrulestrainingtransformerstoexplainunexpectedinputs/093dc7d3-24a5-47b1-9aab-afd431172fcc_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad18c1a185e326289096d451feebab106097e4e846556ab44f97716b7daa5f02
3
+ size 218978
abductionrulestrainingtransformerstoexplainunexpectedinputs/full.md ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AbductionRules: Training Transformers to Explain Unexpected Inputs
2
+
3
+ Nathan Young, Qiming Bao, Joshua Bensemann, Michael Witbrock
4
+
5
+ Strong AI Lab
6
+
7
+ School of Computer Science
8
+
9
+ University of Auckland
10
+
11
+ {nathan young, josh.bensemann, m.witbrock}@auckland.ac.nz
12
+
13
+ qbao775@aucklanduni.ac.nz
14
+
15
+ # Abstract
16
+
17
+ Transformers have recently been shown to be capable of reliably performing logical reasoning over facts and rules expressed in natural language, but abductive reasoning - inference to the best explanation of an unexpected observation - has been underexplored despite significant applications to scientific discovery, common-sense reasoning, and model interpretability.
18
+
19
+ We present AbductionRules, a group of natural language datasets designed to train and test generalisable abduction over natural-language knowledge bases. We use these datasets to fine-tune pretrained Transformers and discuss their performance, finding that our models learned generalisable abductive techniques but also learned to exploit the structure of our data. Finally, we discuss the viability of this approach to abductive reasoning and ways in which it may be improved in future work.
20
+
21
+ # 1 Introduction
22
+
23
+ Since its introduction, models based on the Transformer (Vaswani et al., 2017) have, due to their learning ability and Turing-completeness (Bhattamishra et al., 2020), sparked research into their use in many applications beyond their original purpose of natural language processing (NLP), including image processing and generation (Parmar et al., 2018; Chen et al., 2020), theorem proving (Polu and Sutskever, 2020; Welleck et al., 2021), and chess (Noever et al., 2020).
24
+
25
+ One such task is logical inference - reasoning over first-order logic (FOL) knowledge bases (collections of facts and rules). Given a knowledge base, one may attempt to find logical implications (deduction), discover rules that extrapolate patterns in known facts (induction), or infer facts that would explain surprising observations (abduction). More specifically, if a newly observed fact $p$ cannot be deduced from an existing knowledge base, abduction is the process of finding one or more facts that,
26
+
27
+ if added to the knowledge base, would allow $p$ to be deduced from existing rules. Figure 1 demonstrates the difference between these three kinds of inference.
28
+
29
+ Traditionally, FOL is represented using a formal mathematical syntax, with facts resembling HUMAN(SOCRATES) and rules resembling $\forall X: \mathrm{HUMAN}(\mathrm{X}) \Rightarrow \mathrm{MORTAL}(\mathrm{X})$ . Clark et al. (2020) recently pioneered an alternative approach we call natural-language logic, which might represent these as "Socrates is human" and "Humans are mortal". This approach, properly followed, retains the precision of the mathematical syntax while also taking advantage of Transformers' NLP aptitude and pretraining. This approach also allows reasoning over texts not written in formal representations.
30
+
31
+ Clark et al. (2020) examined their models' potential for deduction only. Tafjord et al. (2021) extended this work to explore abduction but retained a focus on deduction.
32
+
33
+ Our goal is to train Transformers to perform abductive reasoning with the following properties:
34
+
35
+ - Natural: Operate over natural language.
36
+ - Generalisable: Be able to apply techniques outside domains in which they were learned.
37
+ - Generative: Produce explanations rather than labelling them as sufficient or insufficient.
38
+ - Single-hop: Produce direct explanations. Instead of "plants are green because chlorophyll is green because green light is not used in photosynthesis", prefer "plants are green because chlorophyll is green". If further explanation is desired, abduction can be applied again.
39
+ - Discerning: Prefer simpler explanations.
40
+ - Explicit: Use given knowledge bases rather than relying on pretraining.
41
+
42
+ <table><tr><td>Deduction:</td><td colspan="3">Socrates is human → Humans are mortal → ?</td></tr><tr><td>Induction:</td><td colspan="3">Socrates is human → ? → Socrates is mortal</td></tr><tr><td>Abduction:</td><td colspan="3">? → Humans are mortal → Socrates is mortal</td></tr></table>
43
+
44
+ Figure 1: A comparison of deduction, induction, and abduction, as attempts to reconstruct different parts of the same line of FOL reasoning. Note that only deduction is fully reliable, induction may go in either direction in this case, and only abduction produces new knowledge.
45
+
46
+ Our efforts to train abduction in this way are motivated by multiple potential applications.
47
+
48
+ - Ray (2007) describes the use of automated abduction in scientific discovery. Since much scientific knowledge exists in the form of natural language rather than formal representations, advances in natural-language abduction would greatly assist in automating the scientific method by helping to explain experimental observations.
49
+ - Ignatiev et al. (2019) describe the use of abduction to interpret deep learning models similar to Transformers, which are infamously difficult to interpret.
50
+ - Abduction may also help solve the longstanding problem of automating common-sense reasoning. Transformers excel at memorising common knowledge but routinely fail to capture any underlying reasoning. Training these models to explain their own outputs may remedy this problem by providing a way to integrate this fractured knowledge into a more connected model of reality.
51
+
52
+ We present the following contributions:
53
+
54
+ - A collection of datasets for training and testing natural-language abduction.
55
+ - A method of synthetically generating more realistic natural-language logic datasets.
56
+ - Experimental results showing that Transformers can perform abductive reasoning without additional architecture.
57
+
58
+ # 2 Related Work
59
+
60
+ # 2.1 Natural-language logic
61
+
62
+ Our work builds on the RuleTaker line of research on natural-language logic. This line began with Clark et al. (2020), who developed RuleTakers to reason deductively over FOL knowledge bases expressed in natural language, judging given facts to
63
+
64
+ be true or false. These achieved promising results but failed to accurately explain their reasoning or generalise to inferences requiring more steps than were seen at training time. PRover (Saha et al., 2020) achieved greater explainability by generating proofs of its answers. Similarly, the Iterative variant of ProofWriter (Tafjord et al., 2021) chained single-hop deductions rather than reasoning through multi-hop deductions all at once, making its reasoning transparent and easily generalisable to unseen depths. multiPRover (Saha et al., 2021) also made use of this iterative approach. The generalisability and interpretability of iterative single-hop reasoning are why we seek to train single-hop abduction.
65
+
66
+ Tafjord et al. (2021) also adapted their deduction-based datasets to train abductive reasoning, achieving success but training multi-hop abduction only, and also requiring models to output every possible explanation. By contrast, we seek to train models to discern between simpler and more complex explanations - for example, to prefer explanations requiring fewer unknown facts.
67
+
68
+ # 2.2 Other adjacent work
69
+
70
+ Bhagavatula et al. (2019) presented two more abduction-based datasets: $\alpha$ -NLI, which tests models' ability to choose which of two hypotheses better explains an observation, and $\alpha$ -NLG, a generative version of the same dataset. These datasets do not give supporting knowledge bases - all background information must come from pretraining. While this is a valuable approach, we seek to investigate how well Transformers can reason over given knowledge bases to incorporate explicit background knowledge.
71
+
72
+ Gontier et al. (2020) investigated Transformers' ability to perform inductive reasoning in natural language, finding them able to extrapolate patterns in given proofs but again unable to generalise to more complex proofs.
73
+
74
+ Saparov and Mitchell (2021) developed an alternative approach to classifying the ProofWriter datasets that does not reason over natural language,
75
+
76
+ instead using a symbolic, Bayesian approach and using abductive reasoning to satisfy constraints. Their models' superior performance demonstrates that while Transformers are effective at logical reasoning, they may benefit from more specialised architecture.
77
+
78
+ # 3 Methodology
79
+
80
+ Prior to our work, there existed no dataset capable of training or testing the kind of abductive reasoning we seek. We therefore present Abduction-Rules, a natural-language logic dataset designed for this task, and use it to train and test several models based on a pretrained Text-to-Text Transfer Transformer, or T5 (Raffel et al., 2020).
81
+
82
+ # 3.1 Datasets
83
+
84
+ AbductionRules has three main predecessors.
85
+
86
+ # 3.1.1 Rule Reasoning
87
+
88
+ The Rule Reasoning dataset developed by Clark et al. (2020) was, to our knowledge, the first natural-language logic dataset.
89
+
90
+ To create this dataset, FOL predicates (e.g. BIG(LION)) were procedurally generated, entities (LION) and attributes (BIG(X)) were extracted, and templates ("The {entity} is {attribute}") were used to create natural-language logic translations ("The lion is big"). Rules were created similarly (e.g. $\forall X: \mathrm{BIG}(X) \Rightarrow \mathrm{BLUE}(X)$ became "If something is big then it is blue"). Facts and rules were grouped into knowledge bases, each with several questions; the model's task is to label each question true or false.
91
+
92
+ The Rule Reasoning dataset includes knowledge bases in several domains; those in the animal-domain use animals as entities while those in the person-domain use peoples' names. All subsequent datasets similarly use these two domains. The animal-domain includes multi-entity facts (CHASES(LION, MOUSE), or "the lion chases the mouse"). For our purposes, we consider the lion to be the main entity and "chases the mouse" to be an attribute of the lion.
93
+
94
+ # 3.1.2 ParaRules
95
+
96
+ Recognising that their translations of mathematical syntax into natural language were strict and unrealistic (e.g. "Charlie is green. Charlie is rough.", Clark et al. (2020) also produced ParaRules, which contained knowledge bases and questions similar to those in the Rule Reasoning dataset, but were
97
+
98
+ paraphrased into more colloquial language (e.g. "Charlie has green teeth and rough skin"). This approach much better prepares Transformers to reason logically over naturally-occurring texts but requires large amounts of human labour to produce. For this reason, ParaRules is much smaller than the Rule Reasoning dataset.
99
+
100
+ # 3.1.3 PARARULE Plus
101
+
102
+ Seeing the value in RuleTaker's size and ease of production as well as the greater utility of ParaRules, Bao (2021) produced PARARULE Plus, a compromise between the Rule Reasoning dataset and ParaRules that procedurally rephrases all rules during generation by using various templates. PARARULE Plus also avoids eschewingz word associations entirely by grouping related attributes (such as "big", "strong", "high" and "huge") and only giving entities attributes from one group. While PARARULE Plus falls short of ParaRules' variety, its greater collection of rephrased rules is highly valuable.
103
+
104
+ # 3.1.4 AbductionRules
105
+
106
+ We adapt the open-source code used to generate PARARULE Plus to create AbductionRules<sup>1</sup>, making the following changes:
107
+
108
+ - Instead of labelling questions (for our purposes, "observations") with "true" or "false", we use the lone fact (or "explanation") that would prove or disprove it.
109
+ - We ensure that no two knowledge bases in the same dataset give the same attributes to the same entities to avoid repeats. This reduces the size of the datasets; to compensate, we increase the number of entities.
110
+ - While each rule has a single condition in PARARULE Plus ("If something is cute, then..."), we give three ("If something is cute, funny, and adorable, then..."), with an entity that satisfies exactly two conditions; the model must identify the third.
111
+
112
+ After making these changes, we produce datasets with increasing levels of complexity.
113
+
114
+ - The first complexity level contains no further changes from PARARULE Plus and yields the dataset Abduction-Animal-0.1.
115
+
116
+ Context(Facts+Rules):
117
+
118
+ Facts: The squirrel is quiet. The leopard is slow. The dog is adorable. The crocodile is heavy. The leopard is boring. The leopard is angry. The crocodile is awful. The leopard attacks the squirrel. The dog is small. The dog is cute. The squirrel is nice. The crocodile likes the dog. The squirrel is kind.
119
+
120
+ Rules: If something is cute, is adorable, and is furry, then it is also lovely. All animals that are obese, are awful, and are heavy, are big. If an animal is fierce, sees the squirrel, and likes the dog, it is tired. Things that are smart, are kind, and are quiet, are also round. If an animal chases the dog, is boring, and attacks the squirrel, then it is also strong. All things that are slow, are sleepy, and are angry, are rough.
121
+
122
+ Observation: The squirrel is round. Explanation: The squirrel is smart.
123
+
124
+ Figure 2: An example observation, explanation, and corresponding context from Abduction-Animal-Simple. The model must output the explanation given the context and observation as input. Facts and rules used to explain the observation are bolded while relevant attributes are highlighted.
125
+
126
+ - At the second complexity level, we shuffle all knowledge bases to prevent models from exploiting the constant position of all sentences and attributes. This yields the dataset Abduction-Animal-0.2.
127
+ - At the third complexity level, we procedurally rephrase rules with random variations instead of using the same templates as PARARULE Plus. For example, the animal-domain FOL rule $\forall X: (\mathrm{BIG(X}) \land \mathrm{HEAVY(X)} \land \mathrm{FIERCE(X)}) \Rightarrow \mathrm{STRONG(X)}$ might be rephrased as "All animals that are big, are heavy, and are fierce, are also strong" or "If something is heavy, is fierce, and is big, it is strong", among many other similar variations. Notably, this rephrasing process involves reordering all attributes so that attributes contained in correct abductions might be first, second, or third. This yields the datasets Abduction-Animal-Simple and Abduction-Person-Simple.
128
+ Figure 2 contains an example item from Animal-Simple.
129
+
130
+ This method of procedural rule rephrasing represents a useful iteration on the natural-language logic approach and leaves room for further improvement. Concentrated work in this line of research may produce synthetic natural-language logic datasets that are larger yet exhibit much wider variety, making this approach more powerful and robust.
131
+
132
+ - At the fourth and final complexity level, we add extraneous confounding rules to knowledge bases. While lower complexity levels only ever have one rule that could explain a given observation, here we create two variations of every (single-entity) rule; one replaces a satisfied condition with an unsatisfied condition, while the other replaces all three conditions. All replacements come from different pools. This yields the datasets Abduction-Animal and Abduction-Person.
133
+
134
+ Figure 3 contains simplified examples of data from each complexity level.
135
+
136
+ We intend each successive complexity level to remove additional idiosyncrasies that might be exploited in lieu of using abduction (i.e. used to "cheat"), so that this exploitation can be detected. We also intend the fourth to train models to favour simpler explanations when strictly more complex explanations are available.
137
+
138
+ # 3.2 Experiments
139
+
140
+ We use AbductionRules to train 8 models based on the pretrained T5 implementation from the HuggingFace Transformers library (Wolf et al., 2020).<sup>3</sup>
141
+
142
+ We first use each training set to train 1 model, yielding 6 models trained at 4 complexity levels across 2 domains. To compare domains and complexity levels, we test all models on all test sets, giving us intra-domain results (isolating the effect of the complexity), and inter-domain results (some isolating the effect of the domain). We expect each successive complexity level to train a better-quality model and the two domains to be mostly comparable with some variation attributable to the animal-domain's multi-entity facts.
143
+
144
+ If our approach were adapted to models extensively trained to reason on many domains, we expect that teaching abduction in every domain would
145
+
146
+ <table><tr><td>Initial</td><td>Shuffled</td><td>Rephrased</td><td>Confounded</td></tr><tr><td>The cat is round.</td><td>The cat is smart.</td><td>The cat is smart.</td><td>The cat is smart.</td></tr><tr><td>The cat is smart.</td><td>If something is round,smart, and quiet, thenit is kind.</td><td>All animals that areround, are smart, andare quiet, are also kind.</td><td>All animals that areround, are smart, andare quiet, are also kind.</td></tr><tr><td>If something is round,smart, and quiet, thenit is kind.</td><td>The cat is round.</td><td>The cat is round.</td><td>The cat is round.If an animal is round,is boring, and is quiet,it is kind.</td></tr></table>
147
+
148
+ Figure 3: A diagram demonstrating the successive changes we make to the AbductionRules knowledge bases.
149
+
150
+ be prohibitively expensive. Therefore, we seek to investigate Transformers' ability to transfer abductive reasoning techniques to domains where these techniques have not been taught but are nonetheless familiar to the Transformer. To this end, we train two more multi-domain models.
151
+
152
+ - We train one model on our simplest dataset and our most complex dataset in another domain, i.e. Animal-0.1 and Person. We name this model $\text{Person} + \text{Animal} - 0.1$ .
153
+ - We train another model on the simplest person-domain dataset and the most complex animal-domain dataset, i.e. Person-Simple and Animal, to compare the two domains. We name this model Animal+Person-Simple.
154
+
155
+ While we are interested in these multi-domain models' performance on all datasets, we are particularly interested in their results on the most complex dataset on which they were not trained (Abduction-Animal and Abduction-Person, respectively). We treat performance on these datasets as a proxy for Transformers' ability to apply abductive reasoning outside the domains in which it was trained.
156
+
157
+ Finally, we use the existing pretrained T5 model with no additional training as our baseline model.
158
+
159
+ # 4 Results
160
+
161
+ Table 1 contains our results, showing the percentage of abductions correctly performed by each model on each test set.
162
+
163
+ Note that no model ever gave a correct answer in a domain on which it was not trained. On the surface, this would suggest that our models were
164
+
165
+ unable to generalise to new domains. However, inspection of inter-domain results shows that this is not entirely accurate; many explanations contain errors but nonetheless identify the ground-truth explanation. For example, the animal models commonly appended "The" to correct explanations, as in "The Bob is small"; while this is incorrect, it nonetheless indicates the correct explanation in a way that suggests the model still performed the correct abduction. We distinguish between two kinds of errors in correct-yet-useful explanations: lossless errors and lossy errors.
166
+
167
+ # 4.1 Lossless errors
168
+
169
+ Explanations with lossless errors failed to match the correct explanation character-for-character but allowed it to be reliably identified.
170
+
171
+ We found several ways in which recognisably correct explanations differed from the ground-truth, such as extra words ("The Bob is small", "The lion is attacks the mouse"), looping ("The dog is is is is is small"), and incorrect grammar ("The anne is wealthy"). While these errors point towards flaws in training, it is a strength of natural-language logic and soft reasoners that they can cope with minor grammar mistakes as long as meaning is preserved.
172
+
173
+ Table 2 contains our results after correcting for these errors. Note that animal-domain models achieved performance comparable to the person-domain models on novel datasets in their own domain, while person-domain models saw minimal inter-domain improvement. multi-domain models also saw almost no improvement, suggesting that having seen correct explanations in both domains eliminated this kind of formatting error.
174
+
175
+ # 4.2 Lossy errors
176
+
177
+ The most important aspect of abduction in our datasets is identification of the correct attribute.
178
+
179
+ <table><tr><td>Model \ Test set</td><td>Animal-0.1</td><td>Animal-0.2</td><td>Animal-Simple</td><td>Animal</td><td>Person-Simple</td><td>Person</td></tr><tr><td>Untrained</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Abduction-Animal-0.1</td><td>100.0%</td><td>99.3%</td><td>48.0%</td><td>28.8%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Abduction-Animal-0.2</td><td>100.0%</td><td>100.0%</td><td>37.7%</td><td>23.2%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Abduction-Animal-Simple</td><td>100.0%</td><td>100.0%</td><td>100.0%</td><td>50.1%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Abduction-Animal</td><td>92.6%</td><td>93.5%</td><td>94.1%</td><td>100.0%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Abduction-Person-Simple</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>100.0%</td><td>25.6%</td></tr><tr><td>Abduction-Person</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>26.8%</td><td>100.0%</td></tr><tr><td>Person+Animal-0.1</td><td>100.0%</td><td>100.0%</td><td>76.7%</td><td>85.5%</td><td>92.9%</td><td>100.0%</td></tr><tr><td>Animal+Person-Simple</td><td>99.1%</td><td>99.1%</td><td>99.4%</td><td>100.0%</td><td>100.0%</td><td>99.8%</td></tr></table>
180
+
181
+ Table 1: Performance of all models on all test sets. Test sets corresponding to training sets are bolded.
182
+
183
+ <table><tr><td>Model \ Test set</td><td>Animal-0.1</td><td>Animal-0.2</td><td>Animal-Simple</td><td>Animal</td><td>Person-Simple</td><td>Person</td></tr><tr><td>Untrained</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Abduction-Animal-0.1</td><td>100.0% (-)</td><td>99.3% (-)</td><td>48.0% (-)</td><td>28.8% (-)</td><td>13.2% (+13.2%)</td><td>10.1% (+10.1%)</td></tr><tr><td>Abduction-Animal-0.2</td><td>100.0% (-)</td><td>100.0% (-)</td><td>38.5% (+0.9%)</td><td>23.6% (+0.4%)</td><td>9.6% (+9.6%)</td><td>5.7% (+5.7%)</td></tr><tr><td>Abduction-Animal-Simple</td><td>100.0% (-)</td><td>100.0% (-)</td><td>100.0% (-)</td><td>50.1% (-)</td><td>34.4% (+34.4%)</td><td>7.0% (+7.0%)</td></tr><tr><td>Abduction-Animal</td><td>92.6% (-)</td><td>93.5% (-)</td><td>94.2% (+0.0%)</td><td>100.0% (-)</td><td>25.0% (+25.0%)</td><td>36.5% (+36.5%)</td></tr><tr><td>Abduction-Person-Simple</td><td>1.5% (+1.5%)</td><td>1.3% (+1.3%)</td><td>0.9% (+0.9%)</td><td>0.3% (+0.3%)</td><td>100.0% (-)</td><td>25.6% (-)</td></tr><tr><td>Abduction-Person</td><td>0.0% (-)</td><td>0.0% (-)</td><td>0.0% (-)</td><td>0.0% (-)</td><td>26.8% (-)</td><td>100.0% (-)</td></tr><tr><td>Person+Animal-0.1</td><td>100.0% (-)</td><td>100.0% (-)</td><td>76.7% (-)</td><td>85.5% (-)</td><td>92.9% (-)</td><td>100.0% (-)</td></tr><tr><td>Animal+Person-Simple</td><td>99.1% (-)</td><td>99.1% (-)</td><td>99.4% (-)</td><td>100.0% (-)</td><td>100.0% (-)</td><td>99.8% (-)</td></tr></table>
184
+
185
+ Table 2: Improvement of all models on all test sets after allowing lossless errors.
186
+
187
+ The entity at the beginning of the explanation always matches that at the beginning of the observation; therefore, if the correct attribute is identified, the correct explanation can be reconstructed.
188
+
189
+ Table 3 contains our results after correcting for these errors. Note that every model achieved some useful results on every test set. Most inter-domain results improved to rival intra-domain results, although the Abduction-Person model continued to struggle. Intra-domain results saw minimal improvement, with none seeing a $>2\%$ point increase. The multi-domain models again saw no visible improvement, further suggesting that these inferior results were avoidable from seeing facts, rules, and explanations in different formats at training time.
190
+
191
+ # 5 Discussion
192
+
193
+ Our results show that models trained on our simplest datasets struggle to generalise to new complexity levels and domains, while those trained on our more complex datasets are better able to generalise but still perform suboptimally. Meanwhile, those trained on combined multi-domain datasets achieve performance superior to the sum of models trained on their parts and easily apply skills outside domains in which they were learned. It is also clear that models trained in the animal-domain achieve better intra-domain and inter-domain performance than person-domain models.
194
+
195
+ The untrained T5 model performs abysmally and merits little discussion, indicating that this abduction task is non-trivial (at least in the way we present it here).
196
+
197
+ # 5.1 Animal-0.1 and Animal-0.2
198
+
199
+ Unsurprisingly, the models trained on our simplest datasets fare the worst of our trained models. Our Animal-0.1 and -0.2 models perform similarly poorly, suggesting that Animal-0.2's additional complexity from randomised sentence orderings was of minimal importance. In fact, the Animal-0.2 model's performance on more complex datasets is worse than its simpler counterpart; examination of its results reveals a tendency to loop on unfamiliar inputs. Given the Animal-0.1 model's $99.3\%$ correct (100% allowing lossy errors) performance on Animal-0.2, we treat these complexity levels as equivalent and the Animal-0.1 model as definitive.
200
+
201
+ The Animal-0.1 model is approximately 1/3 as accurate on the person-domain when allowing lossless errors but only loses approximately $6\%$ points when allowing lossy errors, suggesting that it fails to adapt to new formats but is mostly able to use the same techniques as in the animal-domain.
202
+
203
+ These models' significant performance hit on higher complexity levels clearly indicates that they exploit the structure of their training set. However,
204
+
205
+ <table><tr><td>Model \ Test set</td><td>Animal-0.1</td><td>Animal-0.2</td><td>Animal-Simple</td><td>Animal</td><td>Person-Simple</td><td>Person</td></tr><tr><td>Untrained</td><td>0.0%</td><td>0.2%</td><td>0.1%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Abduction-Animal-0.1</td><td>100.0% (-)</td><td>100.0% (+0.7%)</td><td>48.4% (+0.4%)</td><td>29.1% (+0.3%)</td><td>41.9% (+28.8%)</td><td>23.5% (+13.4%)</td></tr><tr><td>Abduction-Animal-0.2</td><td>100.0% (-)</td><td>100.0% (-)</td><td>39.4% (+0.8%)</td><td>24.5% (+0.9%)</td><td>29.4% (+19.8%)</td><td>14.3% (+8.6%)</td></tr><tr><td>Abduction-Animal-Simple</td><td>100.0% (-)</td><td>100.0% (-)</td><td>100.0% (-)</td><td>50.1% (-)</td><td>66.4% (+32.0%)</td><td>14.9% (+7.9%)</td></tr><tr><td>Abduction-Animal</td><td>92.6% (-)</td><td>93.5% (-)</td><td>94.2% (-)</td><td>100.0% (-)</td><td>39.1% (+14.1%)</td><td>62.7% (+26.2%)</td></tr><tr><td>Abduction-Person-Simple</td><td>39.8% (+38.4%)</td><td>42.8% (+41.5%)</td><td>39.6% (+38.7%)</td><td>11.5% (+11.2%)</td><td>100.0% (-)</td><td>25.6% (-)</td></tr><tr><td>Abduction-Person</td><td>5.2% (+5.2%)</td><td>5.0% (+5.0%)</td><td>4.9% (+4.9%)</td><td>15.8% (+15.8%)</td><td>26.8% (-)</td><td>100.0% (-)</td></tr><tr><td>Person+Animal-0.1</td><td>100.0% (-)</td><td>100.0% (-)</td><td>76.7% (-)</td><td>85.6% (+0.0%)</td><td>92.9% (-)</td><td>100.0% (-)</td></tr><tr><td>Animal+Person-Simple</td><td>99.1% (-)</td><td>99.1% (-)</td><td>99.4% (-)</td><td>100.0% (-)</td><td>100.0% (-)</td><td>99.8% (-)</td></tr></table>
206
+
207
+ Table 3: Improvement of all models on all test sets after allowing lossy errors.
208
+
209
+ it should be noted that the Animal-0.1 model drops each time by approximately a factor of 2. If this model only chose the penultimate attribute in a sentence containing the attribute in the question, its accuracy would drop by a factor of 3 with procedural rephrasing and again with confounding rules. Therefore, both models utilise some level of generalised abductive reasoning.
210
+
211
+ # 5.2 Animal-Simple and Person-Simple
212
+
213
+ The Animal-Simple model significantly outperforms our simpler models; this makes sense since Animal-0.1 and -0.2 can be thought of as special, unshuffled cases of Animal-Simple. Similarly to the Animal-0.1 model, the Animal-Simple model performs about half as well on Animal as on Animal-Simple. This model also performs worse on Person-Simple than Animal when allowing lossless errors but better when allowing lossy errors, implying that it exploits the structure of Animal-Simple to some degree to identify correct attributes. Its performance drop from Person-Simple to Person is greater than from Animal-Simple to Animal, suggesting that changes in domain and complexity are more difficult to generalise when compounded.
214
+
215
+ Our Person-Simple model also performs well but fails to generalise to higher complexity; this can be partially explained by the multi-entity facts in the animal-domain, as rules using these facts are not used to create confounding rules. This model gives almost no correct inter-domain explanations unless lossy errors are allowed, in which case it achieves similar inter-domain performance to the animal-domain models. Its performance drop on Animal can be compared to that of the Animal-Simple model from Person-Simple to Person, exacerbated by the person-domain models' poorer performance in general.
216
+
217
+ # 5.3 Abduction-Animal and Abduction-Person
218
+
219
+ The Animal model performs the best of all single-domain models, achieving $>60\%$ performance on all datasets except Person-Simple when allowing lossy errors. The drop from Person to Person-Simple is evidence of cheating, but its generalisability is superior to all other models and demonstrates some abductive ability. Surprisingly, it achieves worse intra-domain results on lower complexity levels than the Animal-Simple model, again indicating that some of its performance is dependent on Animal's rule structure. Still, this performance drop is relatively small (being $< 10\%$ in all cases), further reinforcing that while this model utilises some degree of both cheating and abduction (like all our models), its abductive capabilities generalise to a promising extent.
220
+
221
+ By contrast, the Person model achieves the worst performance of any model, performing as well on Person-Simple as that dataset's model does on Person and achieving abysmal inter-domain performance, even on Animal. This model is the clearest indication that (our instantiations of) the two domains are not equivalent; the animal-domain's models are much better able to generalise. The multi-entity rules again offer some explanatory power - the Animal model demonstrates some overtraining on the confounding rules and so performs more poorly in their absence, but still learned to explain observations using multi-entity rules that lacked confounding equivalents, making it robust to extraneous rules but not reliant on them. If this were a major determining factor, we would expect models trained on both maximally and minimally complex datasets to be even more robust and generalised.
222
+
223
+ # 5.4 Multi-domain models
224
+
225
+ Our multi-domain models are our best-performing models by far, achieving superior performance on unseen datasets than the sum of models trained on
226
+
227
+ their combined training sets' parts.
228
+
229
+ The Person+Animal-0.1 model, being trained on our simplest dataset and having its most complex training set come from the worse of our two training domains, is the worse of our two multi-domain models. Nonetheless, it reaches a remarkable level of performance, explaining $>76\%$ of all observations correctly on all test sets. Its performance in the face of unconfounded rephrased rules (something unprecedented in its training) is dependent on the domain. In the person-domain (i.e. on Person-Simple), where it received its most complex training, it achieves its best result on a dataset it was not trained on (excepting Animal-0.2), while in the animal-domain (i.e. on Animal-Simple) it achieves its worst result, having not seen any rephrased animal rules at training time. Still, it demonstrates a greater ability than any single-domain model to generalise to these unfamiliar rule structures. It can also apply its training on confounded rules outside the domain in which it was learned, achieving far greater performance on Animal than any other dataset that it was not trained on.
230
+
231
+ The Animal+Person-Simple model is our best and most promising, achieving $>99\%$ performance on every dataset and consistently adapting to all complexity levels in every domain. Like Person+Animal-0.1, it encounters unprecedented rule structures (singular single-entity animal rules, confounded person rules) and generalises almost perfectly to each. While our datasets remain somewhat limited in scope, we believe that this result demonstrates that Transformers can generalise abductive techniques beyond the domains in which those techniques were trained, provided the domain itself is not entirely novel.
232
+
233
+ Extrapolating these multi-domain results, it seems likely that finetuning Transformers that have received extensive pretraining (such as GPT-3 (Brown et al., 2020)) on datasets covering more varied and complex examples of abduction would make these models capable of much more generalised natural-language abductive reasoning.
234
+
235
+ # 6 Conclusion
236
+
237
+ We have presented the AbductionRules datasets and shown that pretrained T5 models finetuned on them exhibit generalised abductive reasoning. Our more complex datasets train abduction more generally and reliably than our less complex datasets. Further, training in multiple domains is superior
238
+
239
+ to training in only one domain, and we have clear evidence of generalisation of techniques from one domain to another.
240
+
241
+ We have presented an innovation in natural-language logic dataset generation, presenting a new middle-ground between the template-based PARARULE Plus (Bao, 2021) and the manually rephrased Pararules (Clark et al., 2020). We believe our results are promising and demonstrate the viability of Transformer-based abduction (and logical reasoning in general), but also indicate opportunities for improvement.
242
+
243
+ # 6.1 Future Work
244
+
245
+ Future work in this area might explore:
246
+
247
+ - Examining skill transfer between different kinds of logical reasoning.
248
+ - Applying abductive techniques in real-world, as opposed to artificial, domains.
249
+ - Generating probability distributions over multiple possible explanations.
250
+ - Testing explanations by verifying that they allow the original observation to be deduced.
251
+ - Explanations that include not only missing premises but the relevant rule(s) they satisfy.
252
+
253
+ # References
254
+
255
+ Qiming Bao. 2021. Pararule plus: A larger deep multi-step reasoning dataset over natural language.
256
+ Chandra Bhagavatula, Ronan Le Bras, Chaitanya Malaviya, Keisuke Sakaguchi, Ari Holtzman, Hannah Rashkin, Doug Downey, Wen-tau Yih, and Yejin Choi. 2019. Abductive commonsense reasoning. In International Conference on Learning Representations.
257
+ Satwik Bhattachamishra, Arkil Patel, and Navin Goyal. 2020. On the computational power of transformers and its implications in sequence modeling. In Proceedings of the 24th Conference on Computational Natural Language Learning, pages 455-475.
258
+ Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. arXiv preprint arXiv:2005.14165.
259
+ Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. 2020. Generative pretraining from pixels. In International Conference on Machine Learning, pages 1691-1703. PMLR.
260
+
261
+ Peter Clark, Oyvind Tafjord, and Kyle Richardson. 2020. Transformers as soft reasoners over language. In Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence (IJCAI-20), pages 3882-3890.
262
+ Nicolas Gontier, Koustuv Sinha, Siva Reddy, and Chris Pal. 2020. Measuring systematic generalization in neural proof generation with transformers. Advances in Neural Information Processing Systems, 33.
263
+ Alexey Ignatiev, Nina Narodytska, and Joao Marques-Silva. 2019. Abduction-based explanations for machine learning models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 1511-1519.
264
+ David Noever, Matt Ciolino, and Josh Kalin. 2020. The chess transformer: Mastering play using generative language models. arXiv preprint arXiv:2008.04057.
265
+ Niki Parmar, Ashish Vaswani, Jakob Uszkoreit, Lukasz Kaiser, Noam Shazeer, Alexander Ku, and Dustin Tran. 2018. Image transformer. In International Conference on Machine Learning, pages 4055-4064. PMLR.
266
+ Stanislas Polu and Ilya Sutskever. 2020. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393.
267
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21:1-67.
268
+ Oliver Ray. 2007. Automated abduction in scientific discovery. In Model-Based Reasoning in Science, Technology, and Medicine, pages 103-116. Springer.
269
+ Swarnadeep Saha, Sayan Ghosh, Shashank Srivastava, and Mohit Bansal. 2020. Prover: Proof generation for interpretable reasoning over rules. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 122-136.
270
+ Swarnadeep Saha, Prateek Yadav, and Mohit Bansal. 2021. multiPRover: Generating multiple proofs for improved interpretability in rule reasoning. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3662-3677, Online. Association for Computational Linguistics.
271
+ Abulhair Saparov and Tom M Mitchell. 2021. A generative symbolic model for more general natural language understanding and reasoning. arXiv preprint arXiv:2105.02486.
272
+
273
+ Oyvind Tafjord, Bhavana Dalvi, and Peter Clark. 2021. ProofWriter: Generating implications, proofs, and abductive statements over natural language. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 3621–3634, Online. Association for Computational Linguistics.
274
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.
275
+ Sean Welleck, Jiacheng Liu, Ronan Le Bras, Hannaneh Hajishirzi, Yejin Choi, and Kyunghyun Cho. 2021. Naturalproofs: Mathematical theorem proving in natural language. arXiv preprint arXiv:2104.01112.
276
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
277
+
278
+ # Appendices
279
+
280
+ # A Rephrasing method
281
+
282
+ Table 4 demonstrates the method we used to rephrase rules in our more complex datasets. Our method made several binary phrasing choices to decide between 16 possible templates, providing more internal variety than PARARULE Plus but less than ParaRules. As well as this random variation, all 3 conditions were shuffled, giving 6 possible orderings and 96 total possible rephrasings.
283
+
284
+ # B Lossless errors
285
+
286
+ The following encompasses all errors we considered.
287
+ lossless - i.e. close enough to the ground truth answer to be reasonably counted as correct.
288
+
289
+ - Unnecessary inclusion of 'the', as in "The Bob is small."
290
+ - Omission of 'the', as in "Cat is smart."
291
+ - Unnecessary inclusion of 'is', as in "The lion is attacks the mouse."
292
+ - Omission of 'is', as in "The squirrel funny."
293
+ - Inclusion of words that are never included in our answers, specifically 'and', 'are', and 'a'.
294
+
295
+ <table><tr><td>Plural?</td><td>Specific?</td><td>Also?</td><td>Then/All?</td><td>Example rephrasing</td></tr><tr><td>×</td><td>×</td><td>×</td><td>×</td><td>If something is big, is heavy, and is fierce, it is strong.</td></tr><tr><td>×</td><td>×</td><td>×</td><td>✓</td><td>If something is big, is heavy, and is fierce, then it is strong.</td></tr><tr><td>×</td><td>×</td><td>✓</td><td>×</td><td>If something is big, is heavy, and is fierce, it is also strong.</td></tr><tr><td>×</td><td>×</td><td>✓</td><td>✓</td><td>If something is big, is heavy, and is fierce, then it is also strong.</td></tr><tr><td>×</td><td>✓</td><td>×</td><td>×</td><td>If an animal is big, is heavy, and is fierce, it is strong.</td></tr><tr><td>×</td><td>✓</td><td>×</td><td>✓</td><td>If an animal is big, is heavy, and is fierce, then it is strong.</td></tr><tr><td>×</td><td>✓</td><td>✓</td><td>×</td><td>If an animal is big, is heavy, and is fierce, it is also strong.</td></tr><tr><td>×</td><td>✓</td><td>✓</td><td>✓</td><td>If an animal is big, is heavy, and is fierce, then it is also strong.</td></tr><tr><td>✓</td><td>×</td><td>×</td><td>×</td><td>Things that are big, are heavy, and are fierce, are strong.</td></tr><tr><td>✓</td><td>×</td><td>×</td><td>✓</td><td>All things that are big, are heavy, and are fierce, are strong.</td></tr><tr><td>✓</td><td>×</td><td>✓</td><td>×</td><td>Things that are big, are heavy, and are fierce, are also strong.</td></tr><tr><td>✓</td><td>×</td><td>✓</td><td>✓</td><td>All things that are big, are heavy, and are fierce, are also strong.</td></tr><tr><td>✓</td><td>✓</td><td>×</td><td>×</td><td>Animals that are big, are heavy, and are fierce, are strong.</td></tr><tr><td>✓</td><td>✓</td><td>×</td><td>✓</td><td>All animals that are big, are heavy, and are fierce, are strong.</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>×</td><td>Animals that are big, is heavy, and is fierce, are also strong.</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>All animals that are big, are heavy, and are fierce, are also strong.</td></tr></table>
296
+
297
+ Table 4: A diagram demonstrating the successive changes we make to the AbductionRules knowledge bases.
298
+
299
+ - Renaming the entity to better resemble training examples; for example, person-domain models sometimes replaced 'the crocodile' with 'Cro' while animal-domain models replaced 'Bob' with 'the bobster'.
300
+ - Looping the correct answer or some part thereof, as in "The dog is is is is small." or "The rabbit is rabbit is adorable."
301
+ - Incorrect capitalisation, as in "The anne is wealthy."
302
+ - Omission of spaces, as in "Thebob is small."
303
+
304
+ # C Abduction-Person-Simple example
305
+
306
+ Figure 4 contains an example item from Abduction-Person-Simple, similarly to Figure 2's example from Abduction-Animal-Simple.
307
+
308
+ # Context(Facts+Rules):
309
+
310
+ Facts: Anne is dull. Dave is nice. Erin is tiny. Fiona is high. Fiona is strong. Erin is small. Dave is clever. Fiona is heavy. Anne is sad. Anne is rough. Erin is thin.
311
+
312
+ Rules: All things that are big, are high, and are strong, are also huge. If something is poor, is small, and is nice, it is also huge. All things that are high, are rough, and are little, are also smart. All things that are clever, are quiet, and are dull, are smart. People that are big, are dull, and are clever, are also short. If a person is thin, is small, and is little, that person is short. If a person is thin, is strong, and is quiet, that person is imperfect. Things that are little, are small, and are nice, are short. If a person is high, is poor, and is rough, then that person is also imperfect. All things that are thin, are big, and are strong, are also huge. If something is clever, is nice, and is quiet, then it is smart. If a person is poor, is rough, and is dull, then that person is imperfect.
313
+
314
+ Question: Fiona is huge.
315
+
316
+ Label: Fiona is big.
317
+
318
+ Figure 4: An example observation, explanation, and corresponding context from Abduction-Person-Simple. The model must output the explanation given the context and observation as input. Facts and rules used to explain the observation are bolded while relevant attributes are highlighted.
abductionrulestrainingtransformerstoexplainunexpectedinputs/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:619d10eec2c25c2c593094c92930ae84b7144ef98f2da5c9f4339652802ecefe
3
+ size 484515
abductionrulestrainingtransformerstoexplainunexpectedinputs/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a174b7ad4b5dbc5c3e98bdbb2f21e8e3a4d641fbe6b290c88bbe4f7c7475b98a
3
+ size 306657
addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/135f70eb-7ce0-4fca-9ddb-ba80e70d3dc4_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db92330c22ff0007d8e8733276febda5b6884cb06797bbf7f440ad03983fdcc4
3
+ size 66651
addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/135f70eb-7ce0-4fca-9ddb-ba80e70d3dc4_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f26c9929bc320d69892daf021eb87fd71903ae74b7d099174de43412a48c82a
3
+ size 80080
addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/135f70eb-7ce0-4fca-9ddb-ba80e70d3dc4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8882cf84d67f6a7e52bb1506767cae68d5dfc46a99a35190c33a253640eb236d
3
+ size 738357
addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/full.md ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Addressing Resource and Privacy Constraints in Semantic Parsing Through Data Augmentation
2
+
3
+ Kevin Yang $^{1,*}$ Olivia Deng $^{2}$ Charles Chen $^{2}$ Richard Shin $^{2}$ Subhro Roy $^{2}$ Benjamin Van Durme $^{2}$
4
+
5
+ <sup>1</sup>UC Berkeley, <sup>2</sup>Microsoft Semantic Machines yangk@berkeley.edu, sminfo@microsoft.com
6
+
7
+ # Abstract
8
+
9
+ We introduce a novel setup for low-resource task-oriented semantic parsing which incorporates several constraints that may arise in real-world scenarios: (1) lack of similar datasets/models from a related domain, (2) inability to sample useful logical forms directly from a grammar, and (3) privacy requirements for unlabeled natural utterances. Our goal is to improve a low-resource semantic parser using utterances collected through user interactions. In this highly challenging but realistic setting, we investigate data augmentation approaches involving generating a set of structured canonical utterances corresponding to logical forms, before simulating corresponding natural language and filtering the resulting pairs. We find that such approaches are effective despite our restrictive setup: in a low-resource setting on the complex SMCalFlow calendaring dataset (Andreas et al., 2020), we observe $33\%$ relative improvement over a non-data-augmented baseline in top-1 match.
10
+
11
+ # 1 Introduction
12
+
13
+ We aim to improve the performance of a semantic parser based on previous user interactions, but without making use of their direct utterances, nor any associated personal identifiable information (PII). Such privacy requirements are common in practical deployment (Kannan et al., 2016), and semantic parsers are commonly used in real-world systems such as Siri and Alexa, converting natural language into structured queries to be executed downstream (Kamath and Das, 2018).
14
+
15
+ Constructing semantic parsers can be expensive: annotating examples consisting of natural language-logical form pairs often requires trained experts. Two complementary lines of work has pursued this concern. First, several works (Zhong et al., 2020; Cao et al., 2020) tackle low-resource semantic parsing via approaches such as data augmentation. A
16
+
17
+ <table><tr><td>Natural</td><td>When is Allison&#x27;s birthday?</td></tr><tr><td>Logical</td><td>(Yield :output (start (singleton (results FindEventWrapperWithDefaults :constraint (Constraint[Event] :subject (? = #(String “Allison&#x27;s birthday”)))))))</td></tr><tr><td>Canonical</td><td>start time of find event called something like “Allison&#x27;s birthday”</td></tr></table>
18
+
19
+ Table 1: An example of natural language, logical form, and canonical form in the SMCalFlow domain. The event title, "Allison's birthday," is private information.
20
+
21
+ second line of work (Wang et al., 2015; Xiao et al., 2016) explores canonical utterances: structured language which maps directly to logical forms, but resembles natural language (Table 1). The use of canonical forms as the target of semantic parsing has shown benefits in accuracy (Shin et al., 2021; Wu et al., 2021).
22
+
23
+ We consider low-resource semantic parsing with further resource and privacy constraints which may arise in practical deployment: beyond a small gold dataset of labeled pairs, we assume only unlabeled natural utterances which must be masked for PII. Unlike many prior works, we assume that (1) we do not have a large dataset of related logical forms in a different domain, (2) we cannot sample arbitrarily many useful logical forms, and (3) we must the preserve privacy of user utterances.
24
+
25
+ We propose several approaches which are compatible with our imposed restrictions, broadly following three steps: (1) generate a set of privacy-preserving canonical utterances; (2) simulate corresponding natural utterances; and (3) filter the resulting canonical-natural utterance pairs to yield additional "silver" data for training. We more than double the performance of a non-data-augmented baseline on the ATIS domain (Hemphill et al., 1990), and achieve a $33\%$ relative improvement on the more realistic SMCalFlow domain (Andreas et al., 2020). We hope these experiments help motivate further research interest in parser improvement for realistic scenarios.
26
+
27
+ # 2 Semantic Parsing in Practice
28
+
29
+ Our setup assumes access exclusively to:
30
+
31
+ 1. a small "seed" dataset $\mathcal{D}$ of natural utterance with corresponding parses, and
32
+ 2. a larger set $\mathcal{U}$ of unlabeled natural utterances, for which PII must be masked before use.
33
+
34
+ In a real-world setting, one might hand-associate the seed dataset $\mathcal{D}$ to train a system for initial deployment, while then leveraging $\mathcal{U}$ to refine a future version of the system.
35
+
36
+ While our setting is highly restrictive, we argue that it reflects practical constraints. For example, in practice, the grammar for logical forms—as well as the synchronous context-free grammar (SCFG) that maps them to canonical utterances—will often be written from scratch, precluding transfer learning methods which leverage a large quantity of similar data in another domain. Moreover, in complex domains, one cannot expect to sample useful logical forms directly from a grammar if the grammar is designed for coverage as in e.g., SMCalFlow (Andreas et al., 2020). Therefore, other than $\mathcal{D}$ , the only additional data (excluding additional manual annotation) are subsequent user inputs in the form of $\mathcal{U}$ , with PII masked to preserve privacy.
37
+
38
+ # 3 Related Work
39
+
40
+ Compared to prior work in low-resource semantic parsing, our task setup's constraints require different approaches.
41
+
42
+ First, we consider semantic parsing on an entirely new grammar for logical forms, rather than adapting to new domains starting from a preexisting grammar (Zhao et al., 2019; Zhong et al., 2020; Burnyshev et al., 2021; Kim et al., 2021; Tseng et al., 2021). For example, Zhong et al. (2020) takes a natural-language-to-SQL model for one database to propose language-SQL training examples for another database.
43
+
44
+ Second, we assume one cannot sample useful canonical utterances directly from the grammar, unlike Zhong et al. (2020) and Cao et al. (2020). For example, Cao et al. (2020) use a backtranslation-esque approach leveraging large numbers of unlabeled natural and canonical utterances.
45
+
46
+ Moreover, we do not even assume direct access to unlabeled natural utterances, due to real-world privacy considerations (Kannan et al., 2016; Campaigna et al., 2017). Many works on low-resource
47
+
48
+ ![](images/81fe50c7536129ff9f12e3dc85b2486b8298b3b7f83b60550cc45a643e6dbd62.jpg)
49
+ Figure 1: Illustration of one of our proposed methods for data augmentation (USER-RANK) in low-resource semantic parsing. We first obtain canonical forms from unlabeled user data using a parser trained on seed data, replacing PII. Next, we simulate corresponding natural language for the generated canonical forms. Finally, we filter the canonical-natural pairs to obtain our final silver data pairs for augmentation.
50
+
51
+ semantic parsing, such as those mentioned previously, do not consider the privacy aspect.
52
+
53
+ Nevertheless, recent work (Shin et al., 2021; Wu et al., 2021; Yin et al., 2021; Schucher et al., 2021) has demonstrated decent performance given just a small seed dataset $\mathcal{D}$ , by combining pretrained language models with constrained decoding. For example, Shin et al. (2021) use only 300 labeled examples in the complex SMCalFlow dialogue domain (Andreas et al., 2020). However, using pretrained models to directly generate silver training data, with a method such as DINO (Schick and Schütze, 2021), is unsuitable in semantic parsing: the models are unaware of either the underlying grammar or the space of parsable queries. One of our contributions is to explore more effective uses of pretrained models for data augmentation in a practical semantic parsing scenario.
54
+
55
+ Finally, the detection of PII in user data is an applied topic of interest (Pilán et al., 2022), such as for safely summarizing call transcripts (Transcribe) or the automatic detection of doxing (Karimi et al., 2022). In our work we implement a solution meant as a proof of concept for our exploration, based on detecting and replacing named entities.
56
+
57
+ # 4 Practical Augmentation
58
+
59
+ While finetuning a pretrained model on the seed dataset $\mathcal{D}$ can yield a reasonable parser $P$ (Shin et al., 2021; Wu et al., 2021), we aim to increase performance via data augmentation. However, our realistic setup precludes many prior approaches. We propose to generate silver data via three main steps, shown in Figure 1: (1) generate a set $\mathcal{C}$ of canonical utterances $c$ , (2) simulate a set $\mathcal{N}$ of corresponding natural utterances $n$ , and (3) filter the resulting $(c,n)$ pairs. We suggest multiple approaches for these steps, and benchmark their efficacy in Sec. 5. The entire procedure can be iterated multiple times as the parser improves.
60
+
61
+ # 4.1 Generating Canonical Utterances
62
+
63
+ First, we generate canonical utterances $c$ . In principle, one could sample directly from a task-specific grammar, but the results may not be useful in practice (Sec. 5). The remaining options are to generate $c$ conditioned on either unlabeled natural utterances $\mathcal{U}$ or the seed data $\mathcal{D}$ .
64
+
65
+ Generation conditioned on $\mathcal{U}$ (USER). We need to mask all PII, but this is difficult to guarantee in the original natural language domain. Therefore, we first train a parser $P$ on $\mathcal{D}$ , and parse each utterance in $\mathcal{U}$ to obtain a set of canonical utterances $\mathcal{C}'$ . In the more structured domain of $\mathcal{C}'$ we can guarantee masking and replacing all PII to yield the final set $\mathcal{C}$ . Critically, it is not necessary that the initial $\mathcal{C}'$ are correct parses of $\mathcal{U}$ ; we only need a realistic distribution over canonical utterances, and the initial $\mathcal{U}$ is no longer parallel to the final $\mathcal{C}$ anyway due to replacing PII. Hence it is acceptable if the parser $P$ 's errors are numerous but unbiased. In any case, the final $\mathcal{C}$ will be somewhat tied to the true distribution of user utterances in $\mathcal{U}$ .
66
+
67
+ Generation conditioned on $\mathcal{D}$ (GPT). A second method of generating $\mathcal{C}$ is SCFG-constrained decoding on an autoregressive language model, prompting with the seed data $\mathcal{D}$ . Specifically, we prompt with a random concatenation of plans from $\mathcal{D}$ , separated by newlines. The SCFG that defines canonical utterances constrains the decoding, forcing the model to output a valid canonical utterance.
68
+
69
+ # 4.2 Simulated Natural Utterances
70
+
71
+ For each canonical $c$ in $\mathcal{C}$ , we now re-generate a natural utterance $n$ . While other methods (e.g., finetuning) are possible, here we employ a prompting approach using GPT3 (Brown et al., 2020). We use a prompt containing $\mathcal{D}$ 's canonical-natural pairs, ending with the canonical utterance $c$ for which we want to sample a corresponding $n$ .
72
+
73
+ # 4.3 Filtering Silver Data
74
+
75
+ Many $(c, n)$ pairs we generate may be low-quality, depending on the task and seed data $\mathcal{D}$ available. To obtain more high-quality pairs, we simulate 20 natural utterances $n$ for each $c$ . We must then filter the resulting pairs, which we do based on either reranking or cycle consistency.
76
+
77
+ Reranking (RANK). We accept the best of 20 simulated $n$ for each $c$ , and add this $(c, n)$ to our training data. The reranker combines two scores: (1) the log-probability that the original $\mathcal{D}$ -trained parser $P$ parses $n$ back to the original canonical $c$ , and (2) the edit distance between $n$ and $c$ (capped based on the length of $c$ ), which should intuitively be maximized to encourage linguistic diversity in the augmented data, perhaps at a small accuracy cost.
78
+
79
+ Cycle consistency (CYC). We accept a $(c, n)$ pair if the original parser $P$ parses $n$ back to $c$ . This assures the resulting pairs' quality, but may skew the distribution toward easier examples, which are less helpful in downstream training.
80
+
81
+ # 5 Experiments
82
+
83
+ Tasks. We evaluate on two domains, both English:
84
+
85
+ 1. ATIS (Hemphill et al., 1990), a flight booking dataset. We use the Break (Wolfson et al., 2020) subset. $^{2}$
86
+ 2. SMCalFlow (Andreas et al., 2020), a calendaring dataset, which we view as the most complex and realistic.
87
+
88
+ In each domain, we assume a seed data $\mathcal{D}$ of just 30 pairs, conducting several trials with different random samples of seed data to mitigate noise from this selection. We sample 300 unlabeled natural utterances $\mathcal{U}$ from the dataset, which must be parsed to canonical forms (using the grammar and SCFG of Shin et al. (2021)) and then PII-masked
89
+
90
+ before use. Our implementation of PII masking is based on recognizing and replacing named entities; see Appendix A for further details.
91
+
92
+ Methods. We evaluate several methods on each task, listed below.
93
+
94
+ 1. BASE, a supervised baseline which finetunes BART (Lewis et al., 2019) on the seed $\mathcal{D}$ following Shin et al. (2021), discarding $\mathcal{U}$ .
95
+ 2. USER-RANK, a data augmentation approach following the USER and RANK methods described in Sec. 4.1 and 4.3 respectively, and depicted in Figure 1.
96
+ 3. GPT-RANK, a similar approach which generates $c$ following GPT from Sec. 4.1 instead.
97
+ 4. USER-CYC, a version which filters $(c,n)$ pairs via cycle consistency (Sec. 4.3).
98
+ 5. GRAM-RANK, a weak baseline that samples initial $c$ directly from the grammar, which we run only on SMCalFlow since our ATIS grammar is too loosely specified for sampling.
99
+
100
+ Results. We observe that our best data augmentation methods (USER-RANK, GPT-RANK) double the performance of the baseline finetuning method BASE on ATIS, and outperform it on SMCalFlow by up to $20\%$ relative gain (Table 2).<sup>3</sup> Nonetheless, absolute performance remains low due to the tiny amount of seed data, although we note that the exact match metric may be unnecessarily harsh, penalizing some semantically equivalent parses.
101
+
102
+ Of interest is that GPT-RANK outperforms BASE despite using only the seed $\mathcal{D}$ , and not extra unlabeled $\mathcal{U}$ . Moreover, iterating the data augmentation procedure (USER-RANK-3X, GPT-RANK-3X) can further improve performance compared to BASE (relative $150\%$ on ATIS, $33\%$ relative on SM-CalFlow), by improving the initial parser $P$ used for parsing unlabeled $\mathcal{U}$ or for filtering pairs $(c,n)$ , although we observed in preliminary experiments that further iterations yielded diminishing benefits.
103
+
104
+ In contrast, USER-CYC performs poorly on ATIS, indicating that the CYC filtering is perhaps too restrictive for certain domains. Even on SM-CalFlow where performance is decent in comparison, the successful cycles are overwhelmingly for relatively trivial canonical utterances (e.g., "Hello! How are you?"). We additionally observe that nearly one-third of cycles are successful, much
105
+
106
+ <table><tr><td>Method</td><td>ATIS</td><td>SMCalFlow</td></tr><tr><td>BASE</td><td>6.8 ± 3.5</td><td>13.2 ± 3.4</td></tr><tr><td>USER-RANK</td><td>13.4 ± 4.1</td><td>15.5 ± 3.7</td></tr><tr><td>GPT-RANK</td><td>13.7 ± 3.2</td><td>15.9 ± 2.7</td></tr><tr><td>USER-CYC</td><td>6.0 ± 2.3</td><td>15.0 ± 4.0</td></tr><tr><td>GRAM-RANK</td><td></td><td>13.4 ± 2.8</td></tr><tr><td>USER-RANK-3X</td><td>17.3 ± 1.3</td><td>17.6 ± 4.6</td></tr><tr><td>GPT-RANK-3X</td><td>16.7 ± 3.5</td><td>16.1 ± 3.0</td></tr></table>
107
+
108
+ more than the actual validation set accuracy of $15\%$ , indicating that our auto-generated user utterances remain less challenging and diverse compared to real user utterances. Meanwhile, GRAMRANK is no better than the unaugmented baseline BASE: sampling plans directly from a grammar is ineffective in a complex, realistic domain like SMCalFlow.
109
+
110
+ # 5.1 Analysis
111
+
112
+ We conduct additional analyses on SMCalFlow.
113
+
114
+ Reranking. First, we run ablations on reranking in USER-RANK (Table 3). While our edit distance heuristic described in Sec. 4.3 makes little difference, reranking of some form is crucial. Meanwhile, there are many possibilities for other reranking procedures.
115
+
116
+ Table 2: Main results on ATIS and SMCalFlow for different methods. Top-1 parsing match percentage evaluated over 5 (ATIS) or 10 (SMCalFlow) trials on different seed datasets $\mathcal{D}$ . For the two highest-performing methods, USER-RANK and GPT-RANK, we iterate data augmentation 3 times on SMCalFlow, yielding USER-RANK-3X and GPT-RANK-3X. USER-RANK-3X performs best overall.
117
+
118
+ <table><tr><td>Method</td><td>SMCalFlow</td></tr><tr><td>BASE</td><td>13.2 ± 3.4</td></tr><tr><td>USER-RANK-3X</td><td>17.6 ± 4.6</td></tr><tr><td>USER-NOEDITRANK-3X</td><td>17.3 ± 4.7</td></tr><tr><td>USER-NORANK</td><td>12.8 ± 3.5</td></tr></table>
119
+
120
+ Table 3: SMCalFlow reranking ablations. Since the version without reranking (USER-NORANK) is no better than the baseline, we do not iterate the data augmentation procedure. The edit distance heuristic makes little difference in this case (USER-NOEDITRANK-3X vs. USER-RANK-3X), but reranking is crucial.
121
+
122
+ Effect of Masking PII. We rerun our full pipeline for USER-RANK on SMCalFlow, removing only
123
+
124
+ the step where we resampled PII, in order to isolate the effect of doing so (Table 4, USER-RANK-KEEPPII). As one might expect, replacing PII hurts performance, albeit slightly. Of course, if PII is not a concern, then many other data augmentation schemes from prior work become possible again.
125
+
126
+ <table><tr><td>Method</td><td>SMCalFlow</td></tr><tr><td>USER-RANK</td><td>15.5 ± 3.7</td></tr><tr><td>USER-RANK-KEEPPII</td><td>16.2 ± 2.8</td></tr></table>
127
+
128
+ Additional Seed Data. We explore using a larger seed dataset $\mathcal{D}$ on both ATIS and SMCalFlow (90 and 100 data points respectively, instead of 30). On SMCalFlow, we observe that USER-RANK's gains over the baseline largely disappear (Table 5). Thus, improved data augmentation methods which still yield gains with larger seed datasets are an important direction for future exploration.
129
+
130
+ Table 4: SMCalFlow ablation where we do not resample PII. As expected, performance is slightly better if we do not need to resample PII.
131
+
132
+ <table><tr><td>Method</td><td>ATIS (90 seed)</td><td>SMCalFlow (100 seed)</td></tr><tr><td>BASE</td><td>21.4 ± 1.8</td><td>31.6 ± 0.3</td></tr><tr><td>USER-RANK</td><td>21.4 ± 1.7</td><td>31.7 ± 1.0</td></tr></table>
133
+
134
+ Examples and Error Analysis. Finally, in Table 6 we show several SMCalFlow example parses by the baseline BASE compared to our highest-performing method USER-RANK-3X. Compared to BASE, USER-RANK-3X is often better at segmenting names (Example 1), and is also more likely to be semantically similar to the gold parse in cases where BASE is wildly incorrect (Example 2). Nevertheless, in the latter example, USER-RANK-3X is still marked wrong, suggesting that our exact match metric may somewhat underrepresent the performance of all models. Finally, both methods struggle on more complex and/or composite intents (Example 3). Additional examples illustrating these phenomena are shown in Appendix B.
135
+
136
+ Table 5: Results with more seed data. We use a seed dataset $\mathcal{D}$ of size 90 (ATIS) or 100 (SMCalFlow) rather than 30, with 3 trials per method. The gains from data augmentation largely disappear at this scale, so we do not do additional augmentation iterations.
137
+
138
+ <table><tr><td colspan="2">Example 1</td></tr><tr><td colspan="2">Previous Agent</td></tr><tr><td>Natural</td><td>please make a meet with my doctor sarah</td></tr><tr><td>BASE</td><td>create event with &quot; doctor&quot;</td></tr><tr><td>USER-RANK-3X</td><td>create event with &quot; doctor sarah&quot;</td></tr><tr><td>Gold Canonical</td><td>create event with &quot; doctor sarah&quot;</td></tr><tr><td colspan="2">Example 2</td></tr><tr><td>Previous Agent</td><td>Let me know if there&#x27;s anything else I can help you with.</td></tr><tr><td>Natural</td><td>no</td></tr><tr><td>BASE</td><td>does there exist an event tomorrow 9 military</td></tr><tr><td>USER-RANK-3X</td><td>Looks good!</td></tr><tr><td>Gold Canonical</td><td>Thanks for your help!</td></tr><tr><td colspan="2">Example 3</td></tr><tr><td colspan="2">Previous Agent</td></tr><tr><td>Natural</td><td>Please accept the bowling fundraiser and tell sammy I will bring refreshments.</td></tr><tr><td>BASE</td><td>create event called &quot; bowling fundraiser&quot; starting month 4 11 2019 5 PM</td></tr><tr><td>USER-RANK-3X</td><td>create event called &quot; bowling fundraiser&quot;</td></tr><tr><td>Gold Canonical</td><td>respond Accepted with comment &quot;I will bring refreshments&quot; to find event called something like &quot; bowling fundraiser&quot;</td></tr></table>
139
+
140
+ Table 6: Example parses by the baseline BASE and our best method USER-RANK-3X on SMCalFlow. Each example contains the previous agent utterance (if it exists) and user utterance in the first two lines, followed by the BASE parse, USER-RANK-3X parse, and gold parse.
141
+
142
+ # 6 Conclusion
143
+
144
+ We have discussed a challenging setting for low resource semantic parsing based on real-world resource and privacy constraints. In addition to a seed dataset, the only resources allowed are unlabeled natural utterances which must be PII-masked. We observe that data augmentation approaches leveraging pretrained language models can still improve over supervised baselines which use only the seed dataset. At the same time, substantial room remains for improvement: there are many alternatives to our reranking procedure for silver data, and our method loses some effectiveness when more labeled data is provided. We hope that our exploratory observations help lay a foundation for further work in realistic data augmentation approaches for semantic parsing.
145
+
146
+ # Ethical Considerations
147
+
148
+ We believe our work makes a positive impact by focusing heavily on the need for privacy considerations when exploring low-resource settings for semantic parsing. However, as our methods rely heavily on large pretrained language models such as GPT3, we may inherit similar biases which such models are known for (Brown et al., 2020).
149
+
150
+ # Acknowledgements
151
+
152
+ We thank the rest of the team at Semantic Machines, as well as our anonymous reviewers, for their support and helpful comments which aided us greatly in improving the paper. We also thank the NSF for their support through a fellowship to the first author.
153
+
154
+ # References
155
+
156
+ Jacob Andreas, John Bufe, David Burkett, Charles Chen, Josh Clausman, Jean Crawford, Kate Crim, Jordan DeLoach, Leah Dorner, Jason Eisner, et al. 2020. Task-oriented dialogue as dataflow synthesis. Transactions of the Association for Computational Linguistics, 8:556-571.
157
+ Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. arXiv preprint arXiv:2005.14165.
158
+ Pavel Burnyshev, Valentin Malykh, Andrey Bout, Ekaterina Artemova, and Irina Pionkovskaya. 2021. A single example can improve zero-shot data generation. arXiv preprint arXiv:2108.06991.
159
+ Giovanni Campagna, Rakesh Ramesh, Silei Xu, Michael Fischer, and Monica S Lam. 2017. Almond: The architecture of an open, crowdsourced, privacy-preserving, programmable virtual assistant. In Proceedings of the 26th International Conference on World Wide Web, pages 341-350.
160
+ Ruisheng Cao, Su Zhu, Chenyu Yang, Chen Liu, Rao Ma, Yanbin Zhao, Lu Chen, and Kai Yu. 2020. Unsupervised dual paraphrasing for two-stage semantic parsing. arXiv preprint arXiv:2005.13485.
161
+ Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, and Matt Gardner. 2019. Drop: A reading comprehension benchmark requiring discrete reasoning over paragraphs. arXiv preprint arXiv:1903.00161.
162
+ Charles T Hemphill, John J Godfrey, and George R Doddington. 1990. The atis spoken language systems pilot corpus. In Speech and Natural Language: Proceedings of a Workshop Held at Hidden Valley, Pennsylvania, June 24-27, 1990.
163
+
164
+ Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python.
165
+ Aishwarya Kamath and Rajarshi Das. 2018. A survey on semantic parsing. arXiv preprint arXiv:1812.00978.
166
+ Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufmann, Andrew Tomkins, Balint Miklos, Greg Corrado, Laszlo Lukacs, Marina Ganea, Peter Young, et al. 2016. Smart reply: Automated response suggestion for email. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 955-964.
167
+ Younes Karimi, Anna Squicciarini, and Shomir Wilson. 2022. Automated detection of doxing on twitter. arXiv preprint arXiv:2202.00879.
168
+ Sungdong Kim, Minsuk Chang, and Sang-Woo Lee. 2021. Neuralwoz: Learning to collect task-oriented dialogue via model-based simulation. arXiv preprint arXiv:2105.14454.
169
+ Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461.
170
+ Ildiko Pilán, Pierre Lison, Lilja Øvrelid, Anthi Papadopoulou, David Sánchez, and Montserrat Batet. 2022. The text anonymization benchmark (tab): A dedicated corpus and evaluation framework for text anonymization. arXiv preprint arXiv:2202.00443.
171
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
172
+ Timo Schick and Hinrich Schütze. 2021. Generating datasets with pretrained language models. arXiv preprint arXiv:2104.07540.
173
+ Nathan Schucher, Siva Reddy, and Harm de Vries. 2021. The power of prompt tuning for low-resource semantic parsing. arXiv preprint arXiv:2110.08525.
174
+ Richard Shin, Christopher H Lin, Sam Thomson, Charles Chen, Subhro Roy, Emmanouil Antonios Platanios, Adam Pauls, Dan Klein, Jason Eisner, and Benjamin Van Durme. 2021. Constrained language models yield few-shot semantic parsers. arXiv preprint arXiv:2104.08768.
175
+ Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. 2018. A corpus for reasoning about natural language grounded in photographs. arXiv preprint arXiv:1811.00491.
176
+
177
+ Amazon Transcribe. Amazon Transcribe now supports redaction of personal identifiable information (PII) for streaming transcriptions. Accessed: 2022-03-08.
178
+ Bo-Hsiang Tseng, Yinpei Dai, Florian Kreyssig, and Bill Byrne. 2021. Transferable dialogue systems and user simulators. arXiv preprint arXiv:2107.11904.
179
+ Yushi Wang, Jonathan Berant, and Percy Liang. 2015. Building a semantic parser overnight. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1332-1342.
180
+ Tomer Wolfson, Mor Geva, Ankit Gupta, Matt Gardner, Yoav Goldberg, Daniel Deutch, and Jonathan Berant. 2020. Break it down: A question understanding benchmark. Transactions of the Association for Computational Linguistics, 8:183-198.
181
+ Shan Wu, Bo Chen, Chunlei Xin, Xianpei Han, Le Sun, Weipeng Zhang, Jiansong Chen, Fan Yang, and Xunliang Cai. 2021. From paraphrasing to semantic parsing: Unsupervised semantic parsing via synchronous semantic decoding. arXiv preprint arXiv:2106.06228.
182
+ Chunyang Xiao, Marc Dymetman, and Claire Gardent. 2016. Sequence-based structured prediction for semantic parsing. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1341-1350.
183
+ Pengcheng Yin, John Wieting, Avirup Sil, and Graham Neubig. 2021. On the ingredients of an effective zero-shot semantic parser. arXiv preprint arXiv:2110.08381.
184
+ Zijian Zhao, Su Zhu, and Kai Yu. 2019. Data augmentation with atomic templates for spoken language understanding. arXiv preprint arXiv:1908.10770.
185
+ Victor Zhong, Mike Lewis, Sida I Wang, and Luke Zettlemoyer. 2020. Grounded adaptation for zero-shot executable semantic parsing. arXiv preprint arXiv:2009.07396.
186
+
187
+ # A Masking and Replacing Personal Identifiable Information
188
+
189
+ # A.1 ATIS
190
+
191
+ The ATIS grammar is somewhat loosely defined and does not clearly indicate the instances of PII. This would be problematic in a real production setting due to making it difficult to guarantee masking out all PII. However, for our experiments we simply truecase the data and apply named entity recognition using spaCy (Honnibal et al., 2020), which we find is highly successful from a qualitative inspection. We treat detected named entities as PII.
192
+
193
+ To remove PII, we devise two methods: 1. masking and 2. generating entirely new plans. In 1., we apply the above method to detect PII, mask it with the entity type, and ask GPT3 to infill (Figure 2). In 2., corresponding to our GPT-RANK method, we feed GPT3 example plans from the seed data and ask for an entirely new plan that does not contain PII (Figure 3).
194
+
195
+ # Truecase
196
+
197
+ Show me all the flights arriving in Charlotte around next Monday please
198
+
199
+ # NER recognition + mask
200
+
201
+ Show me all the flights arriving in [GPE] around [TIME] please
202
+
203
+ # GPT3 fill based on entity type
204
+
205
+ Show me all the flights arriving in Austin around 5 pm please
206
+
207
+ Figure 2: An example of infill masking PII.
208
+
209
+ # A.2 SMCalFlow
210
+
211
+ Since the SMCalFlow grammar (described in detail in Appendix A.2 of Shin et al. (2021)) is type-annotated, we define three categories of PII: names, event titles, and locations. Each category is easily identifiable from the logical form, so it suffices to sample a new value from the same category in the logical form to guarantee that PII is replaced.
212
+
213
+ We sample names from a distribution balanced for ethnicity and gender. For event titles and locations, we sample them from GPT3 by prompting with seed data canonical forms containing event titles and/or locations, and then prefixing the generation with find event called something like " (event titles) or a mix of weather at " and find event at " (locations). We cut off the generation once the next " appears.
214
+
215
+ INPUT: return ground transportation;return #1 in boston;return #2 between the airport;return #3 and downtown
216
+
217
+ return flights ;return #1 that are nonstop ;return #2 from san diego ;return #3 to new york
218
+
219
+ return flights ;return #1 from denver ;return #2 to pittsburgh ;return #3 leaving after 6pm ;return #3 leaving before 7pm ;return #1 of both #4 and #5
220
+
221
+ GPT3: return flights ;return #1 that are nonstop ;return #2 between denver ;return #3 and oakland ;return #4 leaving after noon ;return #5 arriving after 5pm
222
+
223
+ Figure 3: INPUT is an example prompt, consisting of several plans from the seed data, given to GPT3 to generate new plans to begin the data augmentation procedure in GPT-RANK.
224
+
225
+ # B Example Parsing
226
+
227
+ We include some additional example parses for USER-RANK-3X compared to BASE on SM-CalFlow, as well as GPT-RANK-3X compared to BASE on ATIS, when both start with the same seed data (Tables 7, 8, 9).
228
+
229
+ We additionally provide some qualitative analysis on the SMCalFlow examples. Most examples that both methods get correct are relatively simple intents (Example 1). However, there are also many examples where one or both methods output a parse which a human might judge semantically equivalent to the true parse (Examples 2 and 3), suggesting that our exact match metric might underrepresent the true strength of the models.
230
+
231
+ The next few examples illustrate cases in which USER-RANK-3X improves over BASE, for example by being better at segmenting names (Examples 4). BASE is also sometimes just wildly incorrect on examples where USER-RANK-3X matches the gold exactly, or at least closely in meaning (Examples 5 and 6). Of course, given the tiny amount of initial training data, USER-RANK-3X is not immune to these types of errors either (Example 7).
232
+
233
+ <table><tr><td colspan="2">Example 1</td></tr><tr><td>Previous Agent</td><td>I&#x27;ve deleted your event matching &quot;doctor&#x27;s appointment&quot;.</td></tr><tr><td>Natural</td><td>thank you</td></tr><tr><td>BASE</td><td>Thanks for your help!</td></tr><tr><td>USER-RANK-3X</td><td>Thanks for your help!</td></tr><tr><td>Gold Canonical</td><td>Thanks for your help!</td></tr><tr><td colspan="2">Example 2</td></tr><tr><td>Previous Agent</td><td>Ok, I won&#x27;t do anything for now. Let me know what else I can help you with.</td></tr><tr><td>Natural</td><td>Make an appointment for tomorrow from 1pm to 6pm</td></tr><tr><td>BASE</td><td>create event starting tomorrow 1 PM to 6 PM</td></tr><tr><td>USER-RANK-3X</td><td>create event starting tomorrow 1 PM to 6 PM</td></tr><tr><td>Gold Canonical</td><td>create event starting tomorrow 1 PM ending 6 PM after that datetime</td></tr><tr><td colspan="2">Example 3</td></tr><tr><td>Previous Agent</td><td>Next is your tour potential development sites on Thursday the 28th from 3:00 to 5:00 PM.</td></tr><tr><td>Natural</td><td>I want to delete that one.</td></tr><tr><td>BASE</td><td>delete the event</td></tr><tr><td>USER-RANK-3X</td><td>delete find event</td></tr><tr><td>Gold Canonical</td><td>delete the event</td></tr><tr><td colspan="2">Example 4</td></tr><tr><td>Previous Agent</td><td>Does one of these work?</td></tr><tr><td>Natural</td><td>When is Easter next year?</td></tr><tr><td>BASE</td><td>start time of find event called something like &quot; Easter next year&quot;</td></tr><tr><td>USER-RANK-3X</td><td>start time of find event called something like &quot; Easter&quot; starting next year</td></tr><tr><td>Gold Canonical</td><td>Easter next year</td></tr></table>
234
+
235
+ Table 7: Example parses by the baseline BASE and our best method USER-RANK-3X on SMCalFlow.
236
+
237
+ <table><tr><td colspan="2">Example 5</td></tr><tr><td colspan="2">Previous Agent</td></tr><tr><td>Natural</td><td>list to me my calendar please</td></tr><tr><td>BASE</td><td>create event on today afternoon</td></tr><tr><td>USER-RANK-3X</td><td>find event</td></tr><tr><td>Gold Canonical</td><td>find event</td></tr><tr><td colspan="2">Example 6</td></tr><tr><td>Previous Agent</td><td>The &quot;library&quot; is on Monday the 30th from 10:00 to 10:30 AM.</td></tr><tr><td>Natural</td><td>Ok! now tell me when does my Cof-fee Date start?</td></tr><tr><td>BASE</td><td>ERROR: can&#x27;t answer trivia</td></tr><tr><td>USER-RANK-3X</td><td>start time of find event called some-thing like &quot; Coffee Date&quot;</td></tr><tr><td>Gold Canonical</td><td>start time of find event called some-thing like &quot; Coffee Date&quot;</td></tr><tr><td colspan="2">Example 7</td></tr><tr><td colspan="2">Previous Agent</td></tr><tr><td>Natural</td><td>Hey, I was wondering who the organi-zer is for the museum event next week.</td></tr><tr><td>BASE</td><td>ERROR: can&#x27;t answer trivia</td></tr><tr><td>USER-RANK-3X</td><td>ERROR: can&#x27;t answer trivia</td></tr><tr><td>Gold Canonical</td><td>organizer of find event called some-thing like &quot; museum&quot; during next week</td></tr></table>
238
+
239
+ Table 8: Additional example parses by the baseline BASE and our best method USER-RANK-3X on SM-CalFlow.
240
+
241
+ <table><tr><td colspan="2">Example 8</td></tr><tr><td colspan="2">Previous Agent</td></tr><tr><td>Natural</td><td>I want a flight from houston to mem-phis on tuesday morning</td></tr><tr><td>BASE</td><td>return flights ;return #1 from houston;return #2 to memphis ;return #3 on tuesday morning</td></tr><tr><td>GPT-RANK-3X</td><td>return flights ;return #1 from houston;return #2 to memphis ;return #3 on tuesday morning</td></tr><tr><td>Gold Canonical</td><td>return flights ;return #1 from houston;return #2 to memphis ;return #3 on tuesday ;return #4 in the morning</td></tr><tr><td colspan="2">Example 9</td></tr><tr><td colspan="2">Previous Agent</td></tr><tr><td>Natural</td><td>What ground transportation is avail-able from the pittsburgh airport to downtown and how much does it cost</td></tr><tr><td>BASE</td><td>return transportation ;return #1 that is ground ;return #2 to downtown pittsburgh; return cost of #4</td></tr><tr><td>GPT-RANK-3X</td><td>return transportation ;return #1 that is ground ;return #2 from the pittsburgh; return #3 to downtown pittsburgh; re-turn cost #4</td></tr><tr><td>Gold Canonical</td><td>return ground transportation ;return #1 which is available ;return #2 from the pittsburgh airport ;return #3 to downtown ;return the cost of #4</td></tr></table>
242
+
243
+ # C Preliminary Experiments on Other Break Subsets
244
+
245
+ We additionally ran preliminary experiments on the DROP (Dua et al., 2019) (reading comprehension) and NLVR2 (Suhr et al., 2018) (language-vision reasoning) subsets of Break (Wolfson et al., 2020). We used a similar setup to our ATIS and SMCalFlow experiments, with 30 initial seed data $\mathcal{D}$ and 300 unlabeled user utterances $\mathcal{U}$ .
246
+
247
+ However, across multiple trials of multiple methods (BASE, USER-RANK, GPT-RANK, USER-CYC), we never observed performance above $2\%$ on either domain. This may be partially due to the diversity of the data; for example, DROP is an amalgamation of data from several sources. However, we hypothesize that this across-the-board poor performance is primarily the result of an SCFG for canonical utterances which results in somewhat unnatural language (Table 10), and that performance could be greatly improved with a better SCFG. Given the current form of our canonical utterances in DROP and NLVR2, it is challenging to learn the task given just 30 seed examples. In comparison, the SMCalFlow canonical utterances (Table 1 in the main text) are much more natural.
248
+
249
+ Table 9: Additional example parses by the baseline BASE and our best method GPT-RANK-3X on ATIS.
250
+
251
+ <table><tr><td>DROP Natural</td><td>Which player had the shortest touchdown reception of the game?</td></tr><tr><td>DROP Canonical</td><td>return touchdown receptions ;return shortest of #1 ;return player of #2</td></tr><tr><td>NLVR2 Natural</td><td>If there are two carts, but only one of them has a canopy.</td></tr><tr><td>NLVR2 Canonical</td><td>return carts ;return number of #1 ;return if #2 is equal to two ;return canopy ;return #1 that has #4 ;return number of #5 ;return if #6 is equal to one ;return if both #3 and #7 are true</td></tr></table>
252
+
253
+ Table 10: Examples of natural utterances with corresponding canonical utterances for DROP and NLVR2 domains. The language of the canonical utterances is relatively unnatural.
254
+
255
+ We additionally inspect some inaccurate example predictions by BASE on DROP and NLVR2, which are often wildly incorrect (Table 11). We also show some example $(c,n)$ pairs generated by our data augmentation procedure, demonstrating the failure to propose good natural language $n$ given the limited data and unnatural canonical $c$ (Table 12).
256
+
257
+ <table><tr><td>DROP Natural</td><td>Which player threw more yards in the game, Young or Manning?</td></tr><tr><td>DROP Top-1 Parse</td><td>return that was the highest ;return that was more of #1 ;return number of #2 for each #1 ;return #1 where #3 is lower than one ;return number of #4</td></tr><tr><td>NLVR2 Natural</td><td>If there are bananas with stickers on them</td></tr><tr><td>NLVR2 Top-1 Parse</td><td>return, ;return number of #1 ;return if #2 is equal to one</td></tr></table>
258
+
259
+ Table 11: Predictions by BASE on DROP and NLVR2 which are wildly incorrect. Our data augmentation methods fare no better.
260
+
261
+ <table><tr><td>DROP Canonical</td><td>return the five</td></tr><tr><td>DROP Simulated Natural</td><td>Fact-checkers failed to catch five factual errors.</td></tr><tr><td>NLVR2 Canonical</td><td>return left image ;return #1 that are dirty ;return if #2 is in one of the images</td></tr><tr><td>NLVR2 Simulated Natural</td><td>If any of the trucks are dirty.</td></tr></table>
262
+
263
+ Table 12: Example simulated natural utterances generated by prompting GPT3 on DROP and NLVR2, after reranking and selecting the best of 20 generations. The correspondence between canonical and simulated natural utterances remains imperfect.
addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82be8765998388514c6cb431455a26b4a36b33c7b5b36fc0a95b63fc70af4f10
3
+ size 618281
addressingresourceandprivacyconstraintsinsemanticparsingthroughdataaugmentation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c1ac30160f693234cf11884637eafc64f9d8ac96beaae1e1f0478ffc7ba4116
3
+ size 337916
afeasibilitystudyofansweragnosticquestiongenerationforeducation/4c27db59-2741-43a3-814f-ccad633391c7_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d13eab269eacd29366a96219d74ba8fc10b494f3a8122d4b660c6808fc8839a
3
+ size 58066
afeasibilitystudyofansweragnosticquestiongenerationforeducation/4c27db59-2741-43a3-814f-ccad633391c7_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b5671cb1dae749407ed0197c2e3744fed6437241c51246aa2afdba603cc41bc
3
+ size 71690
afeasibilitystudyofansweragnosticquestiongenerationforeducation/4c27db59-2741-43a3-814f-ccad633391c7_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6086e85758fda600c1b94e106c06a20cbdbd18cbd8ce55e7c861cb2a9566e032
3
+ size 299330
afeasibilitystudyofansweragnosticquestiongenerationforeducation/full.md ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Feasibility Study of Answer-Agnostic Question Generation for Education
2
+
3
+ Liam Dugan, Eleni Miltsakaki, Shriyash Upadhyay, Etan Ginsberg, Hannah Gonzalez, Daytheon Choi, Chuning Yuan, Chris Callison-Burch University of Pennsylvania
4
+
5
+ {ldugan,elenimi,shriyash,etangins,hannahgl,dhchoi,dianacny,ccb}@seas.upenn.edu
6
+
7
+ # Abstract
8
+
9
+ We conduct a feasibility study into the applicability of answer-agnostic question generation models to textbook passages. We show that a significant portion of errors in such systems arise from asking irrelevant or uninterpretable questions and that such errors can be ameliorated by providing summarized input. We find that giving these models human-written summaries instead of the original text results in a significant increase in acceptability of generated questions $(33\% \rightarrow 83\%)$ as determined by expert annotators. We also find that, in the absence of human-written summaries, automatic summarization can serve as a good middle ground.
10
+
11
+ # 1 Introduction
12
+
13
+ Writing good questions that target salient concepts is difficult and time consuming. Automatic Question Generation (QG) is a powerful tool that could be used to significantly lessen the amount of time it takes to write such questions. A QG system that automatically generates relevant questions from textbooks would help professors write quizzes faster and help students stay engaged when reviewing course material.
14
+
15
+ Previous work on QG has focused primarily on answer-aware QG models. These models require the explicit selection of an answer span in the input context, typically through the usage of highlight tokens. This adds significant overhead to the question generation process and is undesirable in cases where clear lists of salient key terms are unavailable. We conduct a feasibility study<sup>1</sup> on the application of answer-agnostic question generation models (ones which do not require manual selection of answer spans) to an educational context. Our contributions are as follows:
16
+
17
+ ![](images/b4387f00a73688c76394fa0087b0dcef14caefae13f9ecc620d7485dd904c256.jpg)
18
+ Figure 1: Relevance, interpretability, and acceptability of generated questions are significantly improved when using human-written summaries (yellow) or automatically-generated summaries (green) as input instead of the original text (red).
19
+
20
+ - We show that the primary way answering diagnostic QG models fail is by generating irrelevant or uninterpretable questions.
21
+ - We show that giving answer-agnostic QG models human-written summaries instead of the original text results in significant increases in question acceptability $(33\% \rightarrow 83\%)$ , relevance $(61\% \rightarrow 95\%)$ , and in-context interpretability $(56\% \rightarrow 94\%)$ .
22
+ - We show that, in absence of human-written summaries, providing automatically generated summaries as input is a good alternative.
23
+
24
+ # 2 Related Work & Background
25
+
26
+ Early attempts to use QG for educational applications involved generating gap-fill or “cloze” questions $^2$ (Taylor, 1953) from textbooks (Agarwal and Mannem, 2011). This procedure has been shown to be effective in classroom settings (Zavala and Mendoza, 2018) and students’ scores on this style
27
+
28
+ of generated question correlate positively with their scores on human-written questions (Guo et al., 2016). However, there are many situations where gap-fill questions are not effective, as they are only able to ask about specific unambiguous key terms.
29
+
30
+ In recent years, with the advent of large crowdsourced datasets for extractive question answering (QA) such as SQuAD (Rajpurkar et al., 2018), neural models have become the primary methods of choice for generating traditional interrogative style questions (Kurdi et al., 2019). A common task formulation for neural QG is to phrase the task as answer-aware, that is, given a context passage $C = \{c_0,\dots ,c_n\}$ and an answer span within this context $A = \{c_k,\dots ,c_{k + l}\}$ , train a model to maximize $P(Q|A,C)$ where $Q = \{q_{0},\dots,q_{m}\}$ are the tokens in the question. These models are typically evaluated using n-gram overlap metrics such as BLEU/ROUGE/Meteor OR (Papineni et al., 2002; Lin, 2004; Banerjee and Lavie, 2005) with the reference being the original human-authored question as provided by the extractive QA dataset.
31
+
32
+ The feasibility of using answer-aware neural QG in an educational setting was investigated by Wang et al. (2018), who used a BiLSTM encoder (Zhang et al., 2015) to encode $C$ and $A$ and a unidirectional LSTM decoder to generate $Q$ . They trained on the SQuAD dataset (Rajpurkar et al., 2018) and evaluated on textbooks from various domains (history, biology, etc.). They showed that generated questions were largely grammatical, relevant, and had high n-gram overlap with human-authored questions. However, given that we may not always have a list of key terms to use as answer spans for an input passage, there is a desire to move past answer-aware QG models and evaluate the feasibility of answer-agnostic models for use in education.
33
+
34
+ Shifting to answer-agnostic models creates new challenges. As Vanderwende (2008) claims, the task of deciding what is and is not important is, itself, an important task. Without manually selected answer spans to guide it, an answer-agnostic model must itself decide what is and is not important enough to ask a question about. This is typically done by separately modeling $P(A|C)$ , i.e., which spans in the input context are most likely to be used as answer targets for questions. The extracted answer spans are then given to an answer-aware QG model $P(Q|A,C)$ . This modeling choice allows for more controllable QG and more direct modeling of term salience.
35
+
36
+ ![](images/635997f783388aa69d624c20c1463ef1c5d79aee53b84b42bb8fb46ed930c0bd.jpg)
37
+ Figure 2: Diagram of the model's three different finetuning tasks: Answer extraction, question generation, and question answering
38
+
39
+ Previous work done by Subramanian et al. (2018) trained a BiLSTM Pointer Network (Vinyals et al., 2015) for this answer extraction task and showed that it outperformed an entity-based baseline when predicting answer spans from SQuAD passages. However, their human evaluation centered around question correctness and fluency rather than relevance of answer selection. Similar follow-up studies also fail to explicitly ask annotators whether or not the extracted answers, and subsequent generated questions, were relevant to the broader topic of the context passage (Willis et al., 2019; Cui et al., 2021; Wang et al., 2019; Du and Cardie, 2018; Alberti et al., 2019; Back et al., 2021).
40
+
41
+ In our study, we explicitly ask annotators to determine whether or not a generated question is relevant to the topic of the textbook chapter from which it is generated. In addition, we show that models trained for answer extraction on SQuAD frequently select irrelevant or ambiguous answers when applied to textbook material. We show that summaries of input passages can be used instead of the original text to aid in the modeling of topic salience and that questions generated from human-written and automatically-generated summaries are more relevant, interpretable, and acceptable.
42
+
43
+ # 3 Methodology
44
+
45
+ To perform answer-agnostic QG, we follow work done by Dong et al. (2019) and Bao et al. (2020) who show that language models, when fine-tuned for both QA and QG, perform better than models tuned for only one of those tasks. We assume that answer extraction will aid both QA and QG and thus use a model that was fine-tuned on all three. We considered using UniLM (Bao et al., 2020) or ProphetNet (Qi et al., 2020) but ultimately chose a T5 language model (Raffel et al., 2020) fine-tuned
46
+
47
+ <table><tr><td></td><td>Key-Term Coverage (§5)</td><td>Total # Sents</td><td>Avg. Sent Length</td></tr><tr><td>A1&#x27;s Summary</td><td>77.6%</td><td>279</td><td>17.56</td></tr><tr><td>A2&#x27;s Summary</td><td>80.7%</td><td>243</td><td>19.28</td></tr><tr><td>A3&#x27;s Summary</td><td>53.4%</td><td>148</td><td>15.37</td></tr></table>
48
+
49
+ Table 1: Analysis of summaries written by our three RAs. Key-Term Coverage is percentage of bolded textbook key terms present in the summary. Average sentence length reported in tokens (space-delimited).
50
+
51
+ on SQuAD due to the clean separation between tasks afforded by T5's task-specific prefixes such as "generate question:" and "extract answer".<sup>3</sup>
52
+
53
+ The three fine-tuning tasks that were used to train the model we used are illustrated in Figure 2. For question generation, the model is trained to perform answer-aware question generation by modeling $P(Q|A,C)$ . For question answering, the model is trained to perform extractive QA by modeling $P(A|C,Q)$ . Finally, for answer extraction, instead of modeling $P(A|C)$ , the model is trained to model $P(A|C')$ with $C' = \{c_0,\dots,c_s,\dots,c_e,\dots,c_{n + 2}\}$ where $c_{s}$ and $c_{e}$ are highlight tokens that denote the start and end of the sentence within which we want to extract an answer span.
54
+
55
+ To generate questions, we iteratively highlight the start and end of each sentence in a given passage and extract at most one answer span per sentence. We then generate one question per extracted answer span using the same model in an answer-aware fashion. Passages longer than 512 tokens are split such that no sentences are divided between sub-passes and all sub-passages have a roughly equal number of sentences.
56
+
57
+ # 4 Experiments
58
+
59
+ Our first experiment evaluates the performance of the model on the original text extracted from Jurafsky and Martin (2020)'s textbook "Speech and Language Processing 3rd Edition."5 To ensure proper comparison, we manually extracted the text from our three chapters of interest (Chapters 2, 3, and 4). When extracting text, all figures, tables, and equations were omitted and all references to them were either replaced with appropriate parentheti-
60
+
61
+ cal citations or removed when possible. In total, we generated 1208 question-answer pairs from the original text.
62
+
63
+ Our second experiment evaluates the performance of the model on human-written summaries. We recruited three research assistants (RAs) as part of an undergraduate research experience to write abstractive summaries for each subsection of the same three chapters of the textbook. They were encouraged to make their summaries easily readable by humans rather than to be easily understandable by machines but otherwise no specific guidelines were given. We report some statistics about these summaries in Table 1 and include examples in Appendix E. From these three sets of summaries we generated a total of 667 question-answer pairs.
64
+
65
+ Our final experiment evaluates the performance of the model on automatically generated summaries. To perform this automatic summarization we used a BART (Lewis et al., 2020) language model which was fine-tuned for summarization on the CNN/DailyMail dataset (Nallapati et al., 2016). The same chunking procedure as described in Section 3 was performed on input passages that were larger than 512 tokens. The summarized output sub-passages were then concatenated together before running question generation. In total, we generated 318 question-answer pairs from our automatic summaries.
66
+
67
+ # 5 Evaluation
68
+
69
+ For evaluation, we randomly sampled 100 question-answer pairs from each of the three experiments to construct our evaluation set of 300 questions. We tasked the same set of RAs to evaluate the quality of the question-answer pairs. All 300 pairs were given to all three annotators. We asked the following yes/no questions:
70
+
71
+ (i). (Acceptable) Would you directly use this question as a flashcard?
72
+ (ii). (Grammatical) Is this question grammatical?
73
+ (iii). (Interpretable) Does the question make sense out of context?
74
+ (iv). (Relevant) Is this question relevant?
75
+ (v). (Correct) Is the answer correct?
76
+
77
+ We provided many example annotations to our annotators and wrote clear guidelines about each cat
78
+
79
+ ![](images/8e13111f79b3d72569ca451e6113c38b7f6059b8918346d48816b42d512ca375.jpg)
80
+ Figure 3: Results of our human evaluation for each input method. Numbers represent the proportion of questions that were labeled as having the given attribute (as determined by majority vote among our three annotators).
81
+
82
+ <table><tr><td>Source</td><td>n</td><td>Qs</td><td>As</td><td>Qs or As</td></tr><tr><td>Original Text</td><td>1209</td><td>70.9%</td><td>70.3%</td><td>88.6%</td></tr><tr><td>Auto Summary</td><td>318</td><td>44.9%</td><td>43.0%</td><td>60.1%</td></tr><tr><td>Human Summary</td><td>667</td><td>63.9%</td><td>68.4%</td><td>86.1%</td></tr></table>
83
+
84
+ egory to ensure high agreement. Our full annotator guidelines can be found in Appendix B.
85
+
86
+ In Figure 3 we report the results of our evaluation across the three sources. We note that a majority of observed errors in the original text questions stem from them being either irrelevant or uninterpretable out of context. We also see that generating questions directly from human-written summaries significantly improves relevance and interpretability, resulting in over $80\%$ being labeled as acceptable by annotators. Finally, in the case of automatic summaries, we see that relevance and interpretability are improved as compared to the original text questions while grammaticality suffers.
87
+
88
+ In Table 2 we evaluate the coverage of our generated questions. Coverage was calculated by extracting the bolded key terms from the textbook chapters and sub-string searching for each term among all questions and answers from a given source. Interestingly, if we think of the results from Figure 3 as precision scores and Table 2 as recall, we can see that human summaries have high precision high recall, original text has low precision high recall, and automatic summaries strike a balance between the two.
89
+
90
+ Table 2: Coverage of bolded key terms from the textbook. Numbers represent percentage of bolded key terms present in any of the $n$ question/answer pairs selected from the given source.
91
+
92
+ <table><tr><td></td><td>A1</td><td>A2</td><td>A3</td><td>Pairwise IAA</td></tr><tr><td>Acceptable</td><td>69.7</td><td>48.7</td><td>47.7</td><td>(0.41, 0.50, 0.33)</td></tr><tr><td>Grammatical</td><td>98.3</td><td>90.7</td><td>86.3</td><td>(0.16, 0.49, 0.10)</td></tr><tr><td>Interpretable</td><td>79.7</td><td>70.7</td><td>59.7</td><td>(0.51, 0.43, 0.32)</td></tr><tr><td>Relevant</td><td>79.0</td><td>71.3</td><td>69.0</td><td>(0.41, 0.29, 0.25)</td></tr><tr><td>Correct</td><td>91.7</td><td>90.7</td><td>90.0</td><td>(0.03, 0.08, 0.06)</td></tr></table>
93
+
94
+ Table 3: Comparison between our three annotators (A1, A2, A3) on all 300 questions across all categories. Numbers represent percentages of "Yes" answers. Pairwise Inter-Annotator Agreement is calculated by Cohen $\kappa$ and is reported in the order (A1-A2, A2-A3, A3-A1).
95
+
96
+ In Table 3 we report the pairwise inter-annotator agreement (IAA) as well as a per-annotator scoring breakdown. We use pairwise Cohen $\kappa$ instead of Fleiss $\kappa$ to better highlight the difference in agreement between certain pairs of annotators. $^{8}$ . While at first glance it may seem that agreement is low for grammaticality and correctness, this is somewhat expected for highly unbalanced classes (Artstein and Poesio, 2008). For the other three categories (relevance, interpretability, acceptability) we see pairwise agreement of approximately 0.4, suggesting a fair degree of agreement for such seemingly ambiguous categories.
97
+
98
+ # 6 Conclusion and Future Work
99
+
100
+ In this work we show that answer-agnostic QG models have difficulty both choosing relevant topics to ask about and generating questions that are interpretable out of context. We show that asking questions on summarized text ameliorates this in large part and that these gains can be approximated by the use of automatic summarization.
101
+
102
+ Future work should seek to further explore the relationship between summarization and QG. Work done concurrently to ours by Lyu et al. (2021) already has promising results in this direction, showing that training a QG model on synthetic data from summarized text improves performance on downstream QA.
103
+
104
+ Additionally, future work should focus on further refining and standardizing the metrics used for both automatic and human evaluation of QG. As noted by Nema and Khapra (2018) n-gram overlap metrics correlate poorly with in-context interpretability and evaluation on downstream QA fails to address the relevance of generated questions.
105
+
106
+ # Acknowledgements
107
+
108
+ We graciously thank Suraj Patil for providing the fine-tuned question generation model used in this project. His training and inference code provided a great starting point for our experiments. We're very grateful for his support.
109
+
110
+ We would also like to thank Prof. Dan Jurafsky and Prof. James Martin for providing us with the raw latex files for their textbook. These files were very helpful for extraction purposes and saved us a lot of time.
111
+
112
+ Finally, we would like to thank the members of our lab for suggestions and feedback. In particular, Dan Deutsch and Alyssa Hwang were particularly influential in shaping the current version of this paper. Their great suggestions made the writing much clearer and much more understandable.
113
+
114
+ # References
115
+
116
+ Manish Agarwal and Prashanth Mannem. 2011. Automatic gap-fill question generation from text books. In Proceedings of the Sixth Workshop on Innovative Use of NLP for Building Educational Applications, pages 56-64, Portland, Oregon. Association for Computational Linguistics.
117
+ Chris Alberti, Daniel Andor, Emily Pitler, Jacob Devlin, and Michael Collins. 2019. Synthetic QA corpora generation with roundtrip consistency. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6168-6173, Florence, Italy. Association for Computational Linguistics.
118
+ Ron Artstein and Massimo Poesio. 2008. Survey article: Inter-coder agreement for computational linguistics. Computational Linguistics, 34(4):555-596.
119
+ Seohyun Back, Akhil Kedia, Sai Chetan Chinthakindi, Haejun Lee, and Jaegul Choo. 2021. Learning to generate questions by learning to recover answer-containing sentences. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 1516-1529, Online. Association for Computational Linguistics.
120
+ Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with improved correlation with human judgments. In Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization, pages 65-72, Ann Arbor, Michigan. Association for Computational Linguistics.
121
+ Hangbo Bao, Li Dong, Furu Wei, Wenhui Wang, Nan Yang, Xiaodong Liu, Yu Wang, Songhao Piao, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2020.
122
+
123
+ Unilmv2: Pseudo-masked language models for unified language model pre-training. In ICML.
124
+ Shaobo Cui, Xintong Bao, Xinxing Zu, Yangyang Guo, Zhongzhou Zhao, Ji Zhang, and Haiqing Chen. 2021. Onestop QAmaker: Extract question-answer pairs from text in a one-stop approach. ArXiv, abs/2102.12128.
125
+ Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pre-training for natural language understanding and generation. Advances in Neural Information Processing Systems, 32.
126
+ Xinya Du and Claire Cardie. 2018. Harvesting paragraph-level question-answer pairs from Wikipedia. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1907-1917, Melbourne, Australia. Association for Computational Linguistics.
127
+ Qi Guo, Chinmay Kulkarni, Aniket Kittur, Jeffrey P. Bigham, and Emma Brunskill. 2016. Questimator: Generating knowledge assessments for arbitrary topics. In Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, IJ-CAI'16, page 3726-3732. AAAI Press.
128
+ Daniel Jurafsky and James H Martin. 2020. Speech and language processing 3rd edition draft. Prentice Hall NJ.
129
+ Ghader Kurdi, Jared Leo, Bijan Parsia, and Salam Al-Emari. 2019. A systematic review of automatic question generation for educational purposes. International Journal of Artificial Intelligence in Education, 30.
130
+ Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
131
+ Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
132
+ Chenyang Lyu, Lifeng Shang, Yvette Graham, Jennifer Foster, Xin Jiang, and Qun Liu. 2021. Improving unsupervised question answering via summarization-informed question generation. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 4134-4148, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
133
+
134
+ Ramesh Nallapati, Bowen Zhou, Cicero dos Santos, Caglar Gulçehre, and Bing Xiang. 2016. Abstractive text summarization using sequence-to-sequence RNNs and beyond. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning, pages 280-290, Berlin, Germany. Association for Computational Linguistics.
135
+ Preksha Nema and Mitesh M. Khapra. 2018. Towards a better metric for evaluating question generation systems. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3950-3959, Brussels, Belgium. Association for Computational Linguistics.
136
+ Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.
137
+ Weizhen Qi, Yu Yan, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, and Ming Zhou. 2020. Prophetnet: Predicting future n-gram for sequence-to-sequence pre-training. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 2401-2410.
138
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer.
139
+ Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable questions for SQuAD. arXiv preprint arXiv:1806.03822.
140
+ Sandeep Subramanian, Tong Wang, Xingdi Yuan, Saizheng Zhang, Adam Trischler, and Yoshua Bengio. 2018. Neural models for key phrase extraction and question generation. In Proceedings of the Workshop on Machine Reading for Question Answering, pages 78-88, Melbourne, Australia. Association for Computational Linguistics.
141
+ Wilson L. Taylor. 1953. "Cloze procedure": A new tool for measuring readability. Journalism Quarterly, 30(4):415-433.
142
+ Lucy Vanderwende. 2008. The importance of being important: Question generation. In Proceedings of the 1st Workshop on the Question Generation Shared Task Evaluation Challenge, Arlington, VA.
143
+ Oriol Vinyals, Meire Fortunato, and Navdeep Jaitly. 2015. Pointer networks. In Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc.
144
+ Siyuan Wang, Zhongyu Wei, Zhihao Fan, Yang Liu, and Xuanjing Huang. 2019. A multi-agent communication framework for question-worthy phrase extraction and question generation. In AAAI.
145
+
146
+ Zichao Wang, Andrew S. Lan, Weili Nie, Andrew E. Waters, Phillip J. Grimaldi, and Richard G. Baraniuk. 2018. Qg-net: A data-driven question generation model for educational content. In Proceedings of the Fifth Annual ACM Conference on Learning at Scale, L@S '18, New York, NY, USA. Association for Computing Machinery.
147
+ Angelica Willis, Glenn M. Davis, Sherry Ruan, Lakshmi Manoharan, James A. Landay, and Emma Brunskill. 2019. Key phrase extraction for generating educational question-answer pairs. Proceedings of the Sixth (2019) ACM Conference on Learning @ Scale.
148
+ Laura Zavala and Benito Mendoza. 2018. On the use of semantic-based aig to automatically generate programming exercises. In Proceedings of the 49th ACM Technical Symposium on Computer Science Education, SIGCSE '18, page 14-19, New York, NY, USA. Association for Computing Machinery.
149
+ Shu Zhang, Dequan Zheng, Xinchen Hu, and Ming Yang. 2015. Bidirectional long short-term memory networks for relation classification. In Proceedings of the 29th Pacific Asia Conference on Language, Information and Computation, pages 73-78, Shanghai, China.
150
+
151
+ # A Software and Data
152
+
153
+ The code and data used in this project can be found in our project repository. The repository houses the 300 annotated questions, the 2,194 un-annotated questions, the text sources used (three chapters of cleaned text from Jurafsky and Martin, three sets of human summaries, one set of automatic summaries), and the code used to generate the questions. We also provide scripts to reproduce the coverage analysis as well as the analysis of our annotations.
154
+
155
+ # B Annotator Guidelines
156
+
157
+ In Table 4, we report the annotation guidelines given to our annotators. In the original document, under each category, 3 or more example annotations were given, each containing an explanation as to why the selection was made. Categories such as grammaticality had 10 or more examples given to ensure maximum agreement between annotators. Several discussion sessions were held between the authors and annotators to ensure that the guidelines were well understood.
158
+
159
+ During annotation, annotators were given the original textbook chapters to use as reference material and were allowed to use online search engines to check for grammaticality and correctness.
160
+
161
+ # Would you directly use this question as a flashcard? (Yes / No):
162
+
163
+ A Yes answer to this question means that the generated question is salient, grammatically correct, non-awkwardly phrased and has one correct answer. If you answer Yes to this question you may skip the rest of the annotation for the given example – the answers for all other questions are assumed to be Yes. If you answer No, then please continue on to the rest of the questions. Importantly, if you *did* answer yes to all of the other questions, do not feel pressured to answer yes to this question. There are many reasons why you might not want to directly use a question as a flashcard (too easy, too general, etc.) that are not enumerated here.
164
+
165
+ # Is this question grammatically correct? (Yes / No):
166
+
167
+ A Yes answer to this question implies that a question has no grammatical errors. Awkwardly worded questions that are grammatical should be annotated as such (answer Yes for these questions).
168
+
169
+ # Does this question make sense out of context? (Yes / No):
170
+
171
+ This question asks if there are any references made by the question to other items that have been "previously discussed". For our use case, questions should never refer to other specific items in the text from which they were drawn. A Yes answer to this implies that the question is interpretable when taken on its own and is a question that someone would ask if there was no pre-existing context.
172
+
173
+ # Is this question relevant? (Yes / No):
174
+
175
+ A Yes answer to this question implies that the question being asked is important for understanding the main points that the chapter (and by extension the book) is attempting to teach. Questions that are relevant should be ones that would plausibly be asked on a quiz or a test from a fairly thorough course on computational linguistics. Questions that are about insignificant details or questions that are about specific illustrated examples that are not useful for understanding the main points of the chapter should be given a No. Anything that is relevant (or tangentially relevant) to computational linguistics should be given a Yes.
176
+
177
+ # Is the answer to the question correct? (Yes / No):
178
+
179
+ A Yes answer to this question implies that the answer given is one of a multitude of plausible correct answers to the question. If the question has multiple correct answers and the given answer is one of them, it should be annotated as a Yes. If the question is bad/ungrammatical or underspecified to such an extent that you cannot judge the answer properly, you should annotate Yes. However, irrelevant questions that are grammatical and reasonably interpretable should be annotated properly.
180
+
181
+ Table 4: Guidelines given to our human annotators before annotating for the acceptability, grammaticality, interpretability, relevance, and correctness of generated questions.
182
+
183
+ <table><tr><td># Questions</td><td>Chapter 2 (n = 139)</td><td>Chapter 3 (n = 93)</td><td>Chapter 4 (n = 66)</td></tr><tr><td>Acceptable</td><td>54.0%</td><td>58.1%</td><td>53.0%</td></tr><tr><td>Grammatical</td><td>94.2%</td><td>93.5%</td><td>93.9%</td></tr><tr><td>Interpretable</td><td>74.1%</td><td>76.3%</td><td>72.7%</td></tr><tr><td>Relevant</td><td>72.7%</td><td>81.7%</td><td>83.3%</td></tr><tr><td>Correct</td><td>95.0%</td><td>100%</td><td>98.5%</td></tr></table>
184
+
185
+ Table 5: Distribution of human evaluation scores across the three chapters of annotation. Labels are determined via majority vote among our three annotators.
186
+
187
+ # C Comparison Across Chapters
188
+
189
+ In Table 5 we report the distribution of scores across chapters. We note that scores are largely consistent across the three chapters, with lower average relevance for Chapter 2 questions possibly owing to the source material containing many worked examples of regular expressions.
190
+
191
+ # D Example Disagreements
192
+
193
+ In Table 6, we list questions for which there was at least one dissenting annotator. We see that for categories such as "Relevant" and "Interpretable", annotations are often dependent on the level of granularity with which the topic is being discussed.
194
+
195
+ For example, a question such as "Who named the minimum edit distance algorithm?" may or may not be relevant depending on how granular of a class the student is taking.
196
+
197
+ For categories such as "Correct" or "Acceptable" certain particularities about otherwise good questions can easily disqualify them from receiving a positive annotation. In the case of "What NLP algorithms require algorithms for word segmentation?", keen-eyed annotators would notice that the question is non-sensical, however others may note that both Japanese and Thai do, in fact, require word segmentation. Particularities such as these make this task very difficult, even for expert annotators.
198
+
199
+ # E Example Summaries
200
+
201
+ In Table 7 we list two examples of textbook sections with their accompanying human and automatic summaries. We see that length of summary varies drastically between our annotators, each of them making different decisions on whether or not to keep or discard certain pieces of information. We also note that automatic summaries are much more extractive in nature while human summaries are generally more abstractive.
202
+
203
+ <table><tr><td>Acceptable</td><td>Q: What is another name for a corpus that NLP algorithms learn from? A: training corpus
204
+ Q: What would happen if we accidentally trained the model on the test set? A: bias
205
+ Q: What would give a lower cross-entropy? A: The more accurate model</td></tr><tr><td>Grammatical</td><td>Q: What are words like uh and um called fillers? A: filled pauses
206
+ Q: What context do words that are in our vocabulary appear in a test set in? A: unseen
207
+ Q: What word has the same lemma cat but are different wordforms? A: cats</td></tr><tr><td>Interpretable</td><td>Q: What gives us a way to quantify both of these intuitions about string similarity? A: Edit distance
208
+ Q: What is another important step in text processing? A: Sentence segmentation
209
+ Q: What seems to matter more than its frequency? A: whether a word occurs or not</td></tr><tr><td>Relevant</td><td>Q: What isn’t big enough to give us good estimates in most cases? A: web
210
+ Q: Who named the minimum edit distance algorithm? A: Wagner and Fischer
211
+ Q: What do algorithms have to deal with? A: ambiguities</td></tr><tr><td>Correct</td><td>Q: What do square brackets not allow us to say? A: s or nothing
212
+ Q: What NLP algorithms require algorithms for word segmentation? A: Japanese and Thai
213
+ Q: What encode some facts that we think of as strictly syntactic in nature? A: Bigram probabilities</td></tr></table>
214
+
215
+ Table 6: Questions for which there was disagreement on the label for the given category
216
+
217
+ <table><tr><td>Original Text: What do we do with words that are in our vocabulary (they are not unknown words) but appear in a test set in an unseen context (for example they appear after a word they never appeared after in training)? To keep a language model from assigning zero probability to these unseen events, we&#x27;ll have to shave off a bit of probability mass from some more frequent events and give it to the events we&#x27;ve never seen. This modification is called smoothing or discounting. In this section and the following ones we&#x27;ll introduce a variety of ways to do smoothing: Laplace (add-one) smoothing, add-k smoothing, stupid backoff, and Kneser-Ney smoothing.</td><td>Original Text: As we saw in the previous section, naive Bayes classifiers can use any sort of feature: dictionaries, URLs, email addresses, network features, phrases, and so on. But if, as in the previous section, we use only individual word features, and we use all of the words in the text (not a subset), then naive Bayes has an important similarity to language modeling. Specifically, a naive Bayes model can be viewed as a set of class-specific unigram language models, in which the model for each class instantiates a unigram language model. Since the likelihood features from the naive Bayes model assign a probability to each word P(wordlc), the model also assigns a probability to each sentence.</td></tr><tr><td>Automatic Summary: What do we do with words that are in our vocabulary (they are not unknown words) but appear in a test set in an unseen context? To keep a language model from assigning zero probability to these unseen events, we&#x27;ll have to shave off a bit of probability mass from some more frequent events. This modification is called smoothing or discounting.</td><td>Automatic Summary: A naive Bayes Bayes model can be viewed as a set of class-specific unigram language models. The model for each class instantiates a language model. Since the likelihood features assign a probability to each word P(wordlc), the model also assigns a probability to each sentence.</td></tr><tr><td>Human Summary (A1): We remove some probability mass for more frequent events and reassign it to unseen events with known words, and this is called smoothing or discounting. We study four 4 main methods of smoothing: Laplace smoothing, add-k smoothing, stupid backoff, and Kneser-Ney smoothing.</td><td>Human Summary (A1): A naive Bayes model can be viewed as a set of class-specific unigram language models.</td></tr><tr><td>Human Summary (A2): Smoothing or discounting is the procedure of transferring the probability mass of frequent events to other words that appear in the test set in an unseen context.</td><td>Human Summary (A2): Naive Bayes models are similar to language modeling in that they can be viewed as a set of class-specific unigram language models. The probability of a sentence being positive is the total product of the individual probabilities that each word in the sentence is positive.</td></tr><tr><td>Human Summary (A3): Not assigning zero to the probability of an unseen word in the test set is called smoothing or discounting. There are different ways to do smoothing: Laplace, add-k smoothing, stupid backoff, Kneser-Ney smoothing.</td><td>Human Summary (A3): A naive Bayes model can be viewed as a set of class-specific unigram language models, in which the model for each class instantiates a unigram language model.</td></tr></table>
218
+
219
+ Table 7: Examples of human and automatic summaries for two sections of "Speech and Language Processing". The left text is from Section 3.4 "Smoothing" and the right text is from Section 4.6 "Naive Bayes as a Language Model". We see that the automatic summaries tend to be more extractive while the human summaries are more abstractive.
afeasibilitystudyofansweragnosticquestiongenerationforeducation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc264f380bc0a05c24c79ca34c0be2b3e6f6f833b5230f606815bd566f4effe
3
+ size 623761
afeasibilitystudyofansweragnosticquestiongenerationforeducation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d72287ee93c9743e7370c9535f5187d427b32105f7025785e32c413a82eb757
3
+ size 245054
afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/a24cb7b8-f045-4e39-a5ee-ff571c730d3e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7ba22c7630e38d7c97ddd9e81712182f5a387fde0f1a1b8aa17543233d3a9c9
3
+ size 109847
afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/a24cb7b8-f045-4e39-a5ee-ff571c730d3e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:472d713f40f62a8b96ca4e06c4ae233893bde053a23ad18b7145286f7a2cc20a
3
+ size 131700
afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/a24cb7b8-f045-4e39-a5ee-ff571c730d3e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e6c6d5a0c65913a20cf2b31add32417bc43cd3bbd5e50a409caaaaeb887db81
3
+ size 386553
afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/full.md ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Few-Shot Semantic Parser for Wizard-of-Oz Dialogues with the Precise ThingTalk Representation
2
+
3
+ Giovanni Campagna Sina J. Semnani Ryan Kearns Lucas Jun Koba Sato Silei Xu Monica S. Lam
4
+
5
+ Computer Science Department
6
+
7
+ Stanford University
8
+
9
+ Stanford, CA, USA
10
+
11
+ {gcampaign,sinaj,kearns,satojk,silei,lam}@cs.stanford.edu
12
+
13
+ # Abstract
14
+
15
+ Previous attempts to build effective semantic parsers for Wizard-of-Oz (WOZ) conversations suffer from the difficulty in acquiring a high-quality, manually annotated training set. Approaches based only on dialogue synthesis are insufficient, as dialogues generated from state-machine based models are poor approximations of real-life conversations. Furthermore, previously proposed dialogue state representations are ambiguous and lack the precision necessary for building an effective agent.
16
+
17
+ This paper proposes a new dialogue representation and a sample-efficient methodology that can predict precise dialogue states in Woz conversations. We extended the ThingTalk representation to capture all information an agent needs to respond properly. Our training strategy is sample-efficient: we combine (1) few-shot data sparsely sampling the full dialogue space and (2) synthesized data covering a subset space of dialogues generated by a succinct state-based dialogue model. The completeness of the extended ThingTalk language is demonstrated with a fully operational agent, which is also used in training data synthesis.
18
+
19
+ We demonstrate the effectiveness of our methodology on MultiWOZ 3.0, a reannotation of the MultiWOZ 2.1 dataset in ThingTalk. ThingTalk can represent $98\%$ of the test turns, while the simulator can emulate $85\%$ of the validation set. We train a contextual semantic parser using our strategy, and obtain $79\%$ turn-by-turn exact match accuracy on the reannotated test set. $^{1}$
20
+
21
+ # 1 Introduction
22
+
23
+ Virtual assistants and task-oriented dialogue agents are transforming how consumers interact with computers. This has led to active research on dialogue state tracking networks (Ren et al., 2019; Zhou and
24
+
25
+ ![](images/40a9f54bfc7d6e7eea543764298b6fe4e73448d044039a56fea0c17826739070.jpg)
26
+ Figure 1: The inference-time flow of a dialogue agent with a contextual semantic parser based on the ThingTalk representation.
27
+
28
+ Small, 2019; Zhang et al., 2020; Chen et al., 2020; Heck et al., 2020), and even full neural networks that track dialogue states, implement dialogue policies, and generate agent utterances (Williams and Zweig, 2016; Eric et al., 2017; Zhang et al., 2020; Peng et al., 2020; Hosseini-Asl et al., 2020).
29
+
30
+ Dialogue state tracking on Wizard-of-Oz task-oriented conversations, where humans are asked to simulate both the agent and the user, has proven to be challenging. For example, despite multiple rounds of manual annotation, the MultiWOZ multi-domain task-oriented dataset still contains significant errors which hamper the development of accurate semantic parsers (Zang et al., 2020; Han et al., 2020; Ye et al., 2021a). An approach to bypass manual annotations is to generate dialogues using a simulator and then manually paraphrase them (Shah et al., 2018). Unfortunately, as we shall show in this paper, such dialogue simulators do not exercise many of the possible dialogue flows seen in Wizard-of-Oz conversations. This gap is likely to widen with real-life conversations.
31
+
32
+ Given the many attempts to create accurate semantic parsers for the MultiWOZ data set, this
33
+
34
+ paper takes a fresh look at the problem of understanding Wizard-of-Oz conversations. We observe two fundamental flaws with the current approach. Previously proposed state representations such as slot-value pairs and the recently proposed hierarchical forms (Cheng et al., 2020) do not capture critical details in the user utterances, such as logical "or" and negation. Even if the semantic parser is $100\%$ accurate, the agent will not be able to satisfy the user's request. Second, it is easy to make errors. The existing slot representation is ambiguous, so it is not possible to be consistently correct. This leads to poor quality of annotation.
35
+
36
+ This paper shows that it is possible to create a precise and accurate semantic parser for Wizard-of-Oz conversations in a sample-efficient manner. We introduce the MultiWOZ 3.0 dataset, a reannotation of the full test set and partial validation set of MultiWOZ 2.1 (Eric et al., 2019), using a new, more precise formal representation. The contributions of this paper include:
37
+
38
+ 1. A precise, complete, executable ThingTalk representation for dialogues. In previous work, we proposed the ThingTalk programming language to represent just a single utterance (Campagna et al., 2019). Here we extend it to a full formal representation of a dialogue, including multiple turns of user input, results from the user request (such as a database lookup or API invocation), and the agent's response. We show that the extended ThingTalk for dialogues is precise enough to capture $98\%$ of the turns in MultiWOZ 3.0. In the rest of the paper, we will refer to the extended ThingTalk language as ThingTalk, unless noted otherwise.
39
+
40
+ We also demonstrate that ThingTalk is a complete representation for dialogues. The agent directly executes the ThingTalk representation to retrieve the results from the databases and APIs, without referring to any of the user utterances. In fact, the same agent code can be used both during simulation and in a real agent deployment.
41
+
42
+ 2. We show that we can obtain a high-quality synthetic training data set with a simulator that adopts the ThingTalk representation. The precision of ThingTalk makes it possible to generate many distinctively different dialogue paths that mirror those in the WOZ conversation. Our experiment shows that our simulator can generate $85\%$ of the user turns.
43
+ 3. We show that by leveraging synthesized dialogues represented in ThingTalk, we can train
44
+
45
+ an effective semantic parser for WOZ conversations. This is significant since it is difficult to annotate dialogues accurately. ThingTalk does not make it easier to annotate, but it is unambiguous. We annotate manually only a few-shot training set, and rely on synthesis for the rest. The few-shot training data is $2\%$ of the typical amount of annotated data.
46
+
47
+ The few-shot training samples in ThingTalk help the semantic parser generalize from the simulated dialogues to WOZ conversations. Whereas the simulator can only generate a subset of the states representable by ThingTalk, ThingTalk can precisely represent nearly all WOZ data.
48
+
49
+ Our novel contextual semantic parser, described in Section 5, obtains a turn-by-turn accuracy of $79\%$ on MultiWOZ 3.0. Note that this model generalizes to utterances that fall out of the realm of simulation.
50
+
51
+ # 2 Related Work
52
+
53
+ State Representation for DST Dialogue State Tracking is the task of predicting a formal representation of a conversation. The standard representation used in DST contains the values of all slots mentioned in the dialogue (El Asri et al., 2017; Budzianowski et al., 2018). This is inadequate in practice. First of all, the definition is ambiguous, as it could mean "all slots mentioned by the user" or "all slots mentioned by either the user or the agent". This has lead to inconsistency in the annotation. Second, the representation does not track the comparison or logical operators in the request, so it cannot model complex queries.
54
+
55
+ Recently, Cheng et al. (2020) proposed adopting a formal representation for both the user and agent state, using the TreeDST representation. TreeDST was built to support only dialogues synthesized and paraphrased from a compatible state machine, while ThingTalk supports the full generality of Wizard-of-Oz conversations.
56
+
57
+ Data Acquisition for DST In recent years, a number of very large DST datasets have been released (Budzianowski et al., 2018; Byrne et al., 2019; Rastogi et al., 2020). The preferred technique to acquire such datasets is through Wizard-of-Oz (Kelley, 1984), a technique in which two humans are instructed to converse with each other, with one person taking the role of the agent. WOZ datasets are expensive, and the annotation quality is poor. A different approach synthesizes a large
58
+
59
+ corpus of dialogues using a state machine, then employs crowdworkers to paraphrase them. Paraphrasing has been applied to semantic parsing (Wang et al., 2015) and dialogues (Shah et al., 2018; Rastogi et al., 2020; Cheng et al., 2020). Paraphrased datasets have less variety than WOZ, and crowdsourced paraphrases are also expensive. Our approach has a significant cost advantage, while matching the variety of WOZ dialogues.
60
+
61
+ Campagna et al. (2020) found that using data synthesized from a small finite state machine, it is possible to increase the accuracy of DST in the transfer learning setting. Later, Yu et al. (2021) proposed using synthesized data to pre-train a DST model, using a different objective function. They showed modest improvements in MultiWOZ 2.1, using the full training set. We instead propose using the same fine-tuning objective for both synthesized and few-shot annotated data.
62
+
63
+ # 3 The ThingTalk Dialogue Language
64
+
65
+ The ThingTalk Dialogue Language is designed to formally capture all relevant information in task-oriented dialogues to interpret what the user says next. This includes the user utterances, the result of the user requests, as well as the agent's replies.
66
+
67
+ To see why the results and the agent's reply are needed, consider the example in Fig. 1. The user has previously asked for a cheap restaurant, and now asks "Do you have anything with Indian food?" In the example, the agent noted that there are many cheap restaurants available, so it is likely that the user wants both "Indian" and "cheap". This is reflected in the query that the command maps to. Conversely, had the agent responded that there are no cheap restaurants, it is likely that the user no longer cares about finding a cheap and only wants Indian. The user query thus would be just:
68
+
69
+ Exec:Restaurant,food $=$ "indian"
70
+
71
+ This illustrates that the meaning of the user utterance depends on the result and the agent's response, so we must include them in the formal dialogue. The previous slot-based representation captures only what is mentioned by the user; it is not precise enough to handle this example.
72
+
73
+ Formally, ThingTalk represents (1) the user state $u \in U$ with the semantics of a single user turn, (2) the agent state $a \in A$ with the semantics of the single agent turn, and (3) the formal dialogue $d \in \mathcal{D}$ to capture all information necessary to interpret the user utterance. In this section, we provide the
74
+
75
+ (a) Sorting and ranking in ThingTalk
76
+
77
+ Agent: There are 14 trains that arrive by 12:45. What time would you like to leave?
78
+
79
+ User: What's the latest train i can take that will still get me there by 12:45?
80
+
81
+ $u_{1} =$ Exec: sort(arry_by desc of Train, arrive_by $\leq 12:45\& \& \& \ldots)$ [1]
82
+
83
+ (b) Projection and logical operators in ThingTalk
84
+
85
+ User: I think i would like to visit both churichill and magdalene colleges. May I have their phone numbers?
86
+
87
+ $u_{1} = \mathrm{Exec}:[phone] \circ f$ Attraction, name $=$ "churchill" || name $=$ "magdalene"
88
+
89
+ Figure 2: ThingTalk representations of user utterance examples in the MultiWOZ 3.0 validation set. $u_{1}$ denotes the user state.
90
+
91
+ Agent: [...] Would you like me to make you a reservation?
92
+
93
+ User: Yes, please make a reservation.
94
+
95
+ $u_{1} =$ Exec:Restaurant.MakeReservation(name $=$ "...")
96
+
97
+ Agent: What day and time?
98
+
99
+ $a_1 =$ SlotFill: book_day, book_time Restaurant.MakeReservation(name $=$ "...")
100
+
101
+ (a) User answers the question
102
+
103
+ User: At 17:30 on Friday.
104
+
105
+ $u_{2} =$ Exec:Restaurant.MakeReservation(name $=$ "...book_time $= 17:30$ book_day $=$ friday);
106
+
107
+ (b) Or, user switches to a new domain instead
108
+
109
+ User: Nevermind. Not at this time. Can you help me find the postcode for the Holiday Inn Cambridge?
110
+
111
+ $u_{2} =$ Exec: Hotel, name $=$ "holiday inn cambridge";
112
+
113
+ Figure 3: Examples of a user continuing or abandoning a transaction, adapted from the MultiWOZ 3.0 validation set. The user state $u_{2}$ denotes this fact by propagating or discarding the action. $a_{1}$ is the agent state.
114
+
115
+ detailed definition of each component. The formal syntax is included in Appendix A.
116
+
117
+ User State. The formal semantics of a user turn is represented by a user state $u \in U$ , which consists of an abstract dialogue act and, for dialogue acts that provide or request information, a sequence of statements: either database queries, or actions with side effects (such as making a reservation). Queries specify the domain of interest and can use the standard relational operators: selection, projection, aggregation, sorting. Actions specify the domain, the action name, and the parameters necessary for the action. User state examples in Figures 1 and 2 with abstract act "Exec" are all queries, while the example in Fig. 3 uses the action "Restaurant.MakeReservation".
118
+
119
+ The user state includes new statements that are implied by the current utterance and statements that the user has previously mentioned and is still interested in pursuing (Fig. 3). Note that a single user utterance may map to multiple ThingTalk statements, possibly in different domains.
120
+
121
+ <table><tr><td>Feature</td><td>Slots</td><td>TreeDST</td><td>Express</td><td>TT</td></tr><tr><td>User</td><td></td><td></td><td></td><td></td></tr><tr><td>Executable Semantics</td><td>×</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Canonicalizable</td><td>×</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>Greetings</td><td>×</td><td>×</td><td>?</td><td>✓</td></tr><tr><td>Learn More, Ask Recomm.</td><td>×</td><td>×</td><td>?</td><td>✓</td></tr><tr><td>Multi-domain Turns</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Request Features:</td><td></td><td></td><td></td><td></td></tr><tr><td>Slot Constraints</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Comparisons</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Logical And</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Logical Or, Not</td><td>×</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Projection</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Ranking</td><td>×</td><td>×</td><td>?</td><td>✓</td></tr><tr><td>Agent</td><td></td><td></td><td></td><td></td></tr><tr><td>Dialogue Acts</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Requested Slots</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Proposed Slots</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
122
+
123
+ Table 1: Comparison of representation power for different lexical features of different formal dialogue languages. TreeDST refers to Cheng et al. (2020), Express refers to Andreas et al. (2020). TT indicates ThingTalk.
124
+
125
+ Agent State. Analogously, each agent turn has a formal agent state $a \in A$ representation, which is computed by the agent policy. The agent state includes an abstract dialogue act, as well as an optional agent statement, which either requests some slots from the user, proposes a new statement to the user, or asks the user to confirm an action.
126
+
127
+ Formal Dialogue Representation. A formal dialogue $d \in \mathcal{D}$ captures all the information in the conversation needed to interpret the user utterance. Specifically, it contains the current agent state, the accumulated results of executing the user statements in previous turns, and the user statements that the user has asked to execute but that are missing some required parameters. The results for queries are the items retrieved from the database; the results for actions are returned by the API call.
128
+
129
+ Comparison with previous representations In Table 1 we compare ThingTalk with three existing state representation: the slots and values representation used in MultiWOZ, the TreeDST representation (Cheng et al., 2020), and the Express representation (Andreas et al., 2020; Tellman, 2021). Note that neither Express nor TreeDST are open-source or available to use, whereas ThingTalk is fully open-source and comes with tools that developers can use. Limited documentation exists for Express, so we use “?” for features we do not know are supported or not.
130
+
131
+ ThingTalk represents user queries and commands as executable database queries and API calls.
132
+
133
+ An executable representation is easier to annotate manually. Other approaches require annotators to be familiar with the semantics of each domain, whereas in our approach annotators just need to learn the database query syntax to annotate for different domains. Additionally, the implementation of the agent only needs to execute ThingTalk statements; no custom per-domain logic is necessary.
134
+
135
+ Furthermore, ThingTalk is canonicalizable: the annotation of the semantics of a turn is syntactically unique, regardless of how the turn is phrased, and the unique form can be computed automatically. This is important both to enforce conventions on manually annotated data, as well as to be able to paraphrase: if the annotation depends on the syntactic form of the utterance, the annotation must be changed after paraphrasing. Express, while executable, is not canonicalizable because it represents coreferences explicitly and expresses updates to the dialogue state as edits. Both features lead to syntactically different representations for the same semantics, for example if the coreference is by name, by constraints, or by pronoun.
136
+
137
+ ThingTalk can represent the full generality of WoZ conversations. For example, ThingTalk can represent turns that have no request, at the beginning and end of the conversation. Neither slots nor TreeDST have a representation for those turns. This oversight highlights the need to design the representation based on real conversations.
138
+
139
+ One feature present in the previous representation that we drop from ThingTalk is the precise slots mentioned by the agent. For example, in response to a user asking for a restaurant, the agent may mention the restaurant "name" and "address." Such slots do not affect the interpretation of the user utterance. Removing them from the agent state coalesces many more utterances into the same state, and allows to approximate more complex human agent utterances, increasing the state coverage and boosting the accuracy of the semantic parser.
140
+
141
+ # 4 Simulator-Agent Architecture
142
+
143
+ To synthesize data for training, we propose a simulator-agent architecture. The state-based simulator takes the role of the human user. The same agent that would be used at deployment time is used during synthesis. The agent is built based on the semantics of ThingTalk, not just the simulator. It can respond correctly to any dialogue $d \in \mathcal{D}$ representable in ThingTalk. On the other hand, the
144
+
145
+ simulator samples a subset space $\mathcal{D}_{\mathrm{Sim}}\subset \mathcal{D}$ .We refer to dialogues in $\mathcal{D}_{\mathrm{Sim}}$ as in-simulation; other dialogues are out-of-simulation.
146
+
147
+ Formally, the architecture has three components:
148
+
149
+ $\operatorname{Agent}(d, u): \mathcal{D} \times U \to \mathcal{D}$ : an agent that accepts a formal dialogue $d \in \mathcal{D}$ , and the user state $u \in U$ representing the last user utterance, to produce a new dialogue $d' \in \mathcal{D}$ . The agent guarantees that if $d \in \mathcal{D}_{\mathrm{Sim}}$ then $d' \in \mathcal{D}_{\mathrm{Sim}}$ .
150
+
151
+ $\operatorname{Sim}(d): \mathcal{D}_{\operatorname{Sim}} \to X \times U$ : a simulator that accepts an in-simulation dialogue $d \in \mathcal{D}_{\operatorname{Sim}}$ , and non-deterministically creates a new user utterance $x \in X$ and its user state $u \in U$ .
152
+
153
+ $\operatorname{CSP}(d, x): \mathcal{D} \times X \to U$ , a contextual semantic parsing model that accepts a dialogue $d \in \mathcal{D}$ which may not be in $\mathcal{D}_{\mathrm{Sim}}$ , and a user utterance $x \in X$ to predict the user state in $U$ .
154
+
155
+ In this section, we describe how the components are used to synthesize training data and build a functional dialogue agent.
156
+
157
+ # 4.1 Training Data Synthesis
158
+
159
+ We synthesize training data for CSP as follows:
160
+
161
+ $\operatorname{Syn}(d): \mathcal{D}_{\mathrm{Sim}} \to \mathcal{D}_{\mathrm{Sim}} \times X \times U$ : the synthesizer accepts a dialogue $d \in \mathcal{D}_{\mathrm{Sim}}$ and returns a training sample produced by using Sim to generate a possible user utterance and a resulting in-simulation dialogue to be predicted, then applying the Agent to continue:
162
+
163
+ $$
164
+ \operatorname {S y n} (d) = \left(d ^ {\prime}, x, u\right), \text {w h e r e}
165
+ $$
166
+
167
+ $$
168
+ (x, u) = \operatorname {S i m} (d), d ^ {\prime} = \operatorname {A g e n t} (d, u)
169
+ $$
170
+
171
+ Starting with a null dialogue, we iteratively use Syn to synthesize training samples. During synthesis, the agent is called in a mock execution environment with no side effects, and it uses a non-deterministic policy that generates many possible agent behaviors. It is helpful to include many agent behaviors because it helps model the human WOZ agent.
172
+
173
+ Following Campagna et al. (2020), both the simulator and the agent policy are implemented using a domain-independent state machine which includes many natural language templates for user and agent utterances. Using the templates and a few natural language phrases for each slot, we can generate dialogues for any new domain with minimal effort.
174
+
175
+ User: Please book a table for 5 at 14:30 on wednesday at Royal Spice. I also need to find a place to stay.
176
+
177
+ $$
178
+ \begin{array}{l} u _ {1} = \text {E x e c}: \text {R e s t a r u r a n t . M a k e R e s e r v a t i o n (} \\ \quad \text {n a m e} = \text {" r o y a l s p i c e "} , \text {b o o k \_ p e o l e} = 5, \\ \quad \text {b o o k \_ t i m e} = 1 4: 3 0, \text {b o o k \_ d a y} = \text {w e d n e s d a y)}; \\ \quad \text {H o t e l}; \end{array}
179
+ $$
180
+
181
+ Agent: I was able to book your table successfully. Your reference number is kqmxil0z. Now, what type of accommodations are you looking for today?
182
+
183
+ Figure 4: Example of an out-of-simulation dialogue from the MultiWOZ 3.0 test set, where the same turn mentions two domains. The simulator never generates such a turn but the agent can reply to it.
184
+
185
+ # 4.2 Deployment
186
+
187
+ After training, the same agent can be used at deploy time to reply to the real user.
188
+
189
+ $\operatorname{Deploy}(d, x): \mathcal{D} \times X \to \mathcal{D}$ : given the current dialogue, a deployable system uses CSP to map the next user utterance to a formal dialogue, which is then used by Agent to continue the dialogue. Let $d_0$ be the empty dialogue and user input $x_1, x_2, \ldots$
190
+
191
+ $$
192
+ \begin{array}{l} d _ {i} = \operatorname {D e p l o y} \left(d _ {i - 1}, x _ {i}\right) \\ = \operatorname {A g e n t} \left(d _ {i - 1}, \operatorname {C S P} \left(d _ {i - 1}, x _ {i}\right)\right) \\ \end{array}
193
+ $$
194
+
195
+ # 4.3 Out-of-simulation Dialogues
196
+
197
+ While the simulator can cover only the most common dialogue paths, ThingTalk is designed to be general, covering many more possible dialogues. To improve generality, the CSP is trained not only with simulated dialogues but also few-shot data annotated with the full expressiveness of ThingTalk. Correspondingly, the agent is written to handle the full representation of ThingTalk. This design makes our parser and agent more robust than those that only train with simulated dialogues. Fig. 4 shows an out-of-simulation dialogue from the MultiWOZ test set. In the example, the agent must reply to two domains at once.
198
+
199
+ We show below some of the out-of-simulation dialogue patterns handled by our agent.
200
+
201
+ - Domain switch: the user switches to a new domain in the middle of a discussion about another; the simulator switches domains only after completing the action.
202
+ - Multidomain: the user refers to two domains in the same utterance; the simulator only refers to one domain at a time.
203
+ - Eager action parameters: the user specifies parameters for an action before completing the query, ignoring a prompt from the agent to refine the query.
204
+
205
+ - Abandoning transactions: the user abandons a transaction after it has been initiated; the simulator never interrupts a transaction.
206
+
207
+ These examples illustrate the many plausible ways in which the user can change the course of a dialogue. Trying to simulate all these possibilities is infeasible, nor is it desirable, as it will worsen the distribution of the training data by overemphasizing uncommon patterns. At the same time, handling these cases is important; thus, we train with few-shot annotated data and rely on the model's inherent generalization capability.
208
+
209
+ # 5 Contextual Semantic Parsing Model
210
+
211
+ # 5.1 Model Architecture
212
+
213
+ Our CSP neural model is fine-tuned from the pre-trained BART model (Lewis et al., 2020). BART is a Transformer encoder-decoder neural network (Vaswani et al., 2017) pre-trained with the task of reconstructing noised inputs. Our model for the user encodes a concatenation of the formal dialogue and the user utterance, and is trained to generate the user state as its output.
214
+
215
+ To reduce the length of the input, the formal dialogue is truncated before feeding to the model: only the last executed query and action in each domain are kept, and the rest is discarded. Previous statements are no longer relevant; information that is still relevant is carried over in the last statement. Additionally, we encode at most one result per query. We observe that the user uses either a coreference to refer to the only/first choice, or uses the entity name. The model is trained to copy entity names from the user utterance.
216
+
217
+ We use BART-Large, with about 400M parameters. We train it with token-level cross-entropy loss and teacher forcing. Hyperparameters and preprocessing details are included in Appendix B.
218
+
219
+ # 5.2 Training Data
220
+
221
+ Data Synthesis. We use Syn to synthesize an initial set of training dialogues, covering all possible combinations of slots at each turn, and many possible paths in $\mathcal{D}_{\mathrm{Sim}}$ .
222
+
223
+ Automatic Paraphrasing. We apply automatic paraphrasing with filtering (Xu et al., 2020) to increase the variety of natural language in each turn. We use a pre-trained BART model fine-tuned on the ParaBank2 general-purpose paraphrasing dataset (Hu et al., 2019). Each user utterance is
224
+
225
+ paraphrased individually. We apply filtering to ensure that the user state does not change for each utterance: each paraphrased utterance, with its associated formal dialogue, is passed to a model trained on synthesized data; the utterance is discarded if the model predicts a different user state than the annotation before paraphrasing.
226
+
227
+ Few-Shot Fine-Tuning. To expose the model to the variety in real-world data, we fine-tune the model with a small number of manually annotated dialogues.
228
+
229
+ Self-Training. Acquiring large fully-annotated WOZ datasets is challenging, because annotations are often erroneous. Acquiring unannotated WOZ datasets, on the other hand, is easier. To use such data when available, we propose using self-training (McClosky et al., 2006; Einolghozati et al., 2019; Zoph et al., 2020). We apply the model fine-tuned on few-shot data to unannotated input, create a training set using the predicted result as annotations, and use that to further fine-tune the model.
230
+
231
+ The annotation of WOZ dialogues requires predictions of the agent state as well, unlike the simulated dialogues where the agent state is generated automatically. We apply the same methodology as for the user states to the agent state, so as to annotate the full dialogues for training.
232
+
233
+ # 6 Evaluation
234
+
235
+ Our evaluation attempts to answer these research questions:
236
+
237
+ 1. How well does our ThingTalk representation model Wizard-of-Oz conversations?
238
+ 2. What accuracy can a model achieve in the task of predicting ThingTalk, given our training data acquisition strategy?
239
+ 3. How well do our dialogue simulator and our dialogue agent approximate real dialogues?
240
+
241
+ # 6.1 Experimental Setting
242
+
243
+ We conduct our experiments using the MultiWOZ dataset (Budzianowski et al., 2018; Eric et al., 2019). This dataset includes English task-oriented dialogues across five domains: Attraction, Hotel, Restaurant, Taxi, and Train.
244
+
245
+ We reannotated parts of MultiWOZ 2.1 with ThingTalk annotations, and we name this version MultiWOZ 3.0. The authors of this paper reannotated the full test set and, due to a lack of time, $36\%$ of the validation set, discarding the rest. Our
246
+
247
+ result is thus a lower-bound on the possible accuracy: with more of the validation set annotated, we expect higher test accuracy.
248
+
249
+ The slot values in our new test set differ from the original annotations in $83\%$ of the turns. This is not surprising because others have already found problems in MultiWOZ 2.1 (Zhou and Small, 2019; Zang et al., 2020; Han et al., 2020), and because ThingTalk and the existing annotations adopt different conventions for when a slot should be included. We found mistakes in the annotations, inconsistent normalization of names, and inconsistent annotation of slots offered by the agent. We dropped $1\%$ of test turns due to unrecoverable human errors, such as the user acting as the agent.
250
+
251
+ We use four datasets for training:
252
+
253
+ - Synthesized dataset, generated using our state-machine-based simulator and agent, consisting of around 1M dialogues across all five domains. The state machine has 20 abstract transitions for the agent, and 43 for the user.
254
+ - Paraphrase dataset, obtained by automatically paraphrasing the synthesized data.
255
+ - Few-Shot dataset, a split of 168 dialogues from the original validation set. This amounts to $2\%$ of the original training set. Another 265 dialogues in the original validation set are used as the 3.0 validation set.
256
+ - Self-Trained dataset, obtained by self-training on the MultiWOZ training set.
257
+
258
+ Dataset statistics are detailed in Appendix C.
259
+
260
+ We use the Genie Toolkit (Campagna et al., 2019) for data synthesis and Hugging Face's Transformers library (Wolf et al., 2020) for the model.
261
+
262
+ # 6.2 Precision of ThingTalk
263
+
264
+ ThingTalk is designed to precisely cover the semantics of Wizard-of-Oz dialogues. We first observe that ThingTalk captures the semantics of the sentences well: it can represent the validation set in its entirety, and $99.8\%$ of the user utterances and $97.6\%$ of the agent utterances in the test set are representable. Overall, that comprises $97.7\%$ of the test turns. ThingTalk cannot represent, for example, out-of-domain questions, questions that cannot be answered using the given database, and agent utterances such as asking the users to wait.
265
+
266
+ User utterances in the test set that cannot be represented are simply counted as errors, while agent utterances that cannot be represented as marked with a single "invalid" dialogue act, which is given
267
+
268
+ as input to the neural model. The model can choose to ignore the invalid dialogue act and attempt to predict the correct user state regardless.
269
+
270
+ # 6.3 Accuracy on the MultiWOZ 3.0 Test Set
271
+
272
+ Our first experiment evaluates how well our CSP model can understand the user utterances in the MultiWOZ 3.0 dataset on four metrics.
273
+
274
+ Exact match accuracy requires the predicted user state to identically match the annotation.
275
+
276
+ Slot accuracy requires the slots provided by the user in the predicted user state to match the annotation, ignoring comparison operators, requested slots, and the dialogue act.
277
+
278
+ Turn-by-turn accuracy assumes that the gold dialogue up to the current turn is available as input.
279
+
280
+ Dialogue accuracy requires predicting the correct state for all the previous and current turns of a given dialogue. This is a challenging but meaningful metric because in practice, once the model fails, the conversation diverges from the WOZ dialogue.
281
+
282
+ We train our CSP model on the combination of Synthesized and Paraphrased sets, fine-tune it on the Few-Shot training set, and fine-tune it again on the Self-Trained set. Our model achieves a $79.2\%$ turn-by-turn accuracy and $44.1\%$ dialogue accuracy in exact match (Table 2).
283
+
284
+ To understand the role of synthesized data, we removed all synthesized data, and train with only the manually annotated few-shot data. The synthesized data improves the turn-by-turn exact match accuracy by $5.5\%$ and the dialogue exact match accuracy by $8.4\%$ . This shows that the low-cost automatically generated training data is effective.
285
+
286
+ We performed an ablation study on the validation set to evaluate the components of our training strategy (Table 2). We first observe that the validation accuracy is higher than the test accuracy, because we used the validation set to refine our synthesis. Training with only synthesized data already delivers a respectable $61.8\%$ turn-by-turn accuracy; with the augmentation of auto-paraphrasing data, turn-by-turn accuracy improves $0.1\%$ , and dialogue accuracy improves $0.4\%$ .
287
+
288
+ The few-shot training alone delivers a high accuracy of $75.6\%$ . When the model trained on synthesized and paraphrased data is fine-tuned with few-shot data, the accuracy is $81.0\%$ , showing that these two approaches complement each other. Self-training further improves the turn-by-turn accuracy by $0.4\%$ , with $1\%$ better dialogue accuracy.
289
+
290
+ <table><tr><td rowspan="2" colspan="2">Training Strategy</td><td colspan="2">Turn-by-Turn</td><td colspan="2">Dialogue</td></tr><tr><td>EM</td><td>Slot</td><td>EM</td><td>Slot</td></tr><tr><td rowspan="2">Test</td><td>Full training</td><td>79.2%</td><td>87.5%</td><td>44.1%</td><td>61.0%</td></tr><tr><td>Few-shot only</td><td>73.7%</td><td>81.6%</td><td>35.7%</td><td>46.3%</td></tr><tr><td rowspan="5">Dev</td><td>Full training</td><td>81.4%</td><td>88.7%</td><td>51.9%</td><td>67.2%</td></tr><tr><td>- self-training</td><td>81.0%</td><td>88.0%</td><td>50.9%</td><td>65.3%</td></tr><tr><td>Synth. only</td><td>61.8%</td><td>73.1%</td><td>29.1%</td><td>38.0%</td></tr><tr><td>Synth. + para.</td><td>61.9%</td><td>73.3%</td><td>29.5%</td><td>37.4%</td></tr><tr><td>Few-shot only</td><td>75.6%</td><td>81.7%</td><td>41.8%</td><td>51.6%</td></tr></table>
291
+
292
+ Table 2: Turn-by-turn and dialogue accuracy, both exact match (EM) and slot, of the CSP model, on the MultiWOZ 3.0 test and validation sets.
293
+
294
+ <table><tr><td>Category</td><td>% Turns</td><td>Accuracy</td></tr><tr><td>Trained</td><td>15.5%</td><td>93.1%</td></tr><tr><td>In-simulation</td><td>69.7%</td><td>82.4%</td></tr><tr><td>Out-of-simulation</td><td>14.7%</td><td>62.7%</td></tr><tr><td>Unknown agent state</td><td>6.3%</td><td>66.0%</td></tr><tr><td>Domain switch</td><td>4.0%</td><td>84.0%</td></tr><tr><td>Eager action parameters</td><td>0.9%</td><td>73.3%</td></tr><tr><td>Multidomain</td><td>0.8%</td><td>16.7%</td></tr><tr><td>Abandon transaction</td><td>0.5%</td><td>25.0%</td></tr></table>
295
+
296
+ # 6.4 Generalization of the Dialogue Model
297
+
298
+ Our strategy is to handle the complexity of Wizard-of-Oz dialogues with a combination of simulated dialogues and few-shot training samples to teach generalization beyond simulated dialogues. We analyze the validation set to understand the difference between the simulated dialogues and the Wizard-of-Oz dialogues, and its effect on accuracy.
299
+
300
+ The results are shown in Table 3. The validation set is divided into:
301
+
302
+ 1. Trained: $15.5\%$ of the validation set turns share the same formal dialogue and user state with some sample in training (ignoring the slot values). Accuracy obtained: $93.1\%$ .
303
+ 2. In-simulation: $69.7\%$ of the validation set turns can be represented by the simulator: the formal context is contained in $\mathcal{D}_{\mathrm{Sim}}$ , and the user state can be generated by the simulator. Accuracy obtained: $82.4\%$ .
304
+ 3. Out-of-simulation: $14.7\%$ of the validation turns require the model to generalize beyond $\mathcal{D}_{\mathrm{Sim}}$ , either through few-shot or its own generalization capabilities. Accuracy obtained: $62.7\%$ .
305
+
306
+ Our synthesizer covers the Wizard-of-Oz
307
+
308
+ Table 3: Turn-by-turn exact match accuracy of validation set, categorized by whether each user utterance is synthesizable by our simulator. For the unsynthesizable category, we further divide in common classes of user behavior not captured by the simulator.
309
+
310
+ <table><tr><td>Model</td><td>Training Data</td><td>Accuracy</td></tr><tr><td>TRADE</td><td>MultiWOZ 2.1</td><td>37.3%</td></tr><tr><td>TRADE</td><td>0-shot 2.1</td><td>12.1%</td></tr><tr><td>SUMBT</td><td>MultiWOZ 2.1</td><td>39.3%</td></tr><tr><td>SUMBT</td><td>0-shot 2.1</td><td>18.3%</td></tr><tr><td>STAR</td><td>MultiWOZ 2.1</td><td>49.9%</td></tr><tr><td>CSP-NOAGENT</td><td>MultiWOZ 2.1</td><td>45.6%</td></tr><tr><td>CSP-NOAGENT</td><td>0-shot 2.1</td><td>13.3%</td></tr><tr><td colspan="2">CSP-NOAGENT + auto-parap.</td><td>12.2%</td></tr><tr><td>CSP</td><td>MultiWOZ 3.0</td><td>37.3%</td></tr><tr><td>CSP</td><td>Synthesized</td><td>23.6%</td></tr><tr><td>CSP</td><td>+ auto-parap.</td><td>25.2%</td></tr></table>
311
+
312
+ Table 4: Dialogue slot accuracy on the MultiWOZ 2.1 test set. CSP-NOAGENT has no formal agent state; it encodes the previous slots, and the current agent and user utterances. 0-shot 2.1 is the synthesized data by Campaigna et al. (2020). CSP was trained on MultiWOZ 3.0 but tested on 2.1.
313
+
314
+ conversations well. Even though our simulator and agent are built using a state machine with only 54 user transitions and 24 agent transitions, $85.2\%$ of the validation set is in-simulation.
315
+
316
+ Research that trains and validates on simulated data is missing a non-trivial population of Wizard-of-Oz dialogues. We found that $14.7\%$ of the validation turns are representable in ThingTalk but are out-of-simulation.
317
+
318
+ Our training strategy generalizes beyond the simulated dialogues. For the out-of-simulation turns, our model achieves an accuracy of $62.7\%$ . The model can generalize well on validation turns where the agent state is unseen in training, achieving $66\%$ accuracy. This result speaks to the strength of using a formal representation of the agent, which avoids interpreting untrained agent utterances.
319
+
320
+ The model also reacts well to strong signals in the user utterance. The model achieves $84.1\%$ accuracy when the user switches domains unexpectedly, and $73.3\%$ accuracy when the user starts issuing slots for the action before completing the query.
321
+
322
+ Finally, when the user issues a command over two domains at once, the model achieves $16.7\%$ accuracy. When the user abandons a booking transaction mid-way, the model achieves $25\%$ accuracy. These kinds of out-of-simulation states are also rare in the few shot training set. The model can generalize, but is biased towards the common cases seen in the training data.
323
+
324
+ # 6.5 Dialogue History vs. Formal Context
325
+
326
+ We wish to evaluate the difference between using dialogue history, as in DST models, and using a formal context. We do so by measuring the dialogue
327
+
328
+ accuracy, which has the same definition for DST and CSP.
329
+
330
+ Because we do not have the resources to rean- notate the training data with ThingTalk, we will use the MultiWOZ 2.1 training set for this experiment. For a DST parser, we use TRADE (Wu et al., 2019), SUMBT (Lee et al., 2019), and STAR (Ye et al., 2021b), three high-performing models for MultiWOZ 2.1. For CSP, we train a model we call CSP-NOAGENT, which uses the same neural architecture as our CSP. Because MultiWOZ 2.1 has no formal agent state annotations, CSP-NOAGENT uses the original slot-value annotation from the immediately preceding turn as the formal input context. This context, the current agent utterance, and the current user utterance are used to predict all the slots from the dialogue. This is the best approximation to ThingTalk possible given the available data; the results provide a lower bound on CSP with fully annotated training data.
331
+
332
+ The results are shown in Table 4. We see that CSP-NOAGENT outperforms TRADE by $8.3\%$ and SUMBT by $6.3\%$ in dialogue accuracy, and is within $4\%$ of STAR, a highly optimized model. Note that CSP-NOAGENT needs no new annotations, and the slot representation captures only a small subset of the information in the utterances. This shows the advantage of replacing the dialogue history with a formal context. It also shows that the use of formal contexts can be applied in other representations.
333
+
334
+ For comparison, we also test our CSP on MultiWOZ 2.1, using self-predicted formal agent states. Our model, trained on MultiWOZ 3.0, reaches $37.3\%$ dialogue accuracy in the MultiWOZ 2.1 test set. This is due to the reannotation of MultiWOZ 3.0, and because the model is trained and tested on data with different annotation conventions. Compared to the dialogue slot accuracy on MultiWOZ 3.0, we observe a gap of about $11\%$ , which serves as a lower bound on the benefit of having experts annotate the test data. Note that our approach does not require manual annotation of a large training set, and therefore expert annotation of test data was feasible.
335
+
336
+ # 6.6 Comparison with Previous 0-Shot Model
337
+
338
+ Our last experiment compares our work with the zero-shot model proposed by Campagna et al. (2020). Their paper only included results with transfer learning on new domains. Here, we eval
339
+
340
+ uate TRADE, SUMBT, and CSP-NOAGENT trained with their synthesized data in a zero-shot fashion. The results shown in Table 4 indicate that the previous approach is inadequate, achieving only $12.1\%$ dialogue accuracy with TRADE and $18.3\%$ with SUMBT. CSP-NOAGENT achieves $13.3\%$ dialogue accuracy. Our approach, instead, achieves $23.6\%$ dialogue accuracy. Adding automatic paraphrasing increases the turn-by-turn accuracy by about $3\%$ for both models.
341
+
342
+ This result shows that our approach is much more effective in synthesizing data. In particular, it is important to represent the agent state formally when training with synthesized data, as it eliminates the need to synthesize and parse agent utterances.
343
+
344
+ # 7 Conclusion
345
+
346
+ This paper presents a sample-efficient methodology, based on the extended ThingTalk representation, to predict precise dialogue states in Wizard-of-Oz conversations. We achieve a turn-by-turn exact-match accuracy of $79.2\%$ on the MultiWOZ 3.0 dataset, while using 50x less manually annotated training data than the original MultiWOZ dataset.
347
+
348
+ The proposed ThingTalk dialogue representation is precise, complete, and executable. It is precise enough to cover $98\%$ of the dialogue turns in MultiWOZ. The precision enables automatic synthesis of dialogues covering $85\%$ of the MultiWOZ data set. ThingTalk is complete and executable, as evidenced by a fully working agent that can simply execute ThingTalk queries without referring to the user input. Furthermore, the agent can handle dialogue flows beyond those that can be simulated.
349
+
350
+ The accuracy is achieved with a contextual semantic parser (CSP) where the dialogue context is represented in ThingTalk rather than the natural language dialogue history. It is trained first with auto-paraphrased synthetic data, fine-tuned with the few-shot annotated data, then self-trained.
351
+
352
+ In summary, this paper shows that with ThingTalk, we can predict WOZ dialogues accurately with training data mostly generated from a state machine. Our methodology thus combines the best of the WOZ and M2M approaches, as it can handle the more realistic WOZ dialogues, while having a low data acquisition cost like M2M.
353
+
354
+ # 8 Ethical Considerations
355
+
356
+ We envision that our training strategy will broaden the availability of task-oriented agents for tasks and populations not currently covered by existing large-scale datasets, due to its low annotation requirement. We have open-sourced tool set designed around our representation for bootstrapping affordable contextual semantic parsers for new domains.
357
+
358
+ Our agent was tuned and evaluated on the MultiWOZ benchmark. MultiWOZ is a crowdsourced Wizard-of-Oz dataset; WOZ datasets are known not to fully represent real-world conversations (Ganhotra et al., 2020). Further research is needed before a dialogue agent based on our methodology can be deployed in the real world. Additionally, the current version of the agent was tuned for English; future work should investigate techniques to automatically localize a contextual semantic parser, analogous to prior research done for single-turn semantic parsers (Moradshahi et al., 2020).
359
+
360
+ Our training strategy replaces manual annotation of data with automatically obtained data, which requires some additional amount of computation time. In practice, such additional compute is small: data synthesis runs in 5 hours on a single machine with no GPUs; the paraphrase dataset can be obtained in about 5 hours on a machine with 4 Nvidia T4 GPUs; training completes within 8 hours on a machine with one Nvidia V100; self-training requires 2 hours on a single Nvidia T4 GPU, and fine-tuning is another 1.5 hours on one Nvidia V100. Overall, the whole process is done with about 22 hours of compute time, well below the cost of human annotation of equivalent amounts of data. We note that the large amount of synthetic data poses no challenge to convergence in practice, so training models with a large amount of synthesized data has little effect on the compute cost.
361
+
362
+ The manually annotated portion of our dataset was obtained from the previously released MultiWOZ 2.1 dataset, a crowdsourced dataset. No crowdsourcing was employed in this paper; the data was annotated by the authors.
363
+
364
+ # Acknowledgments
365
+
366
+ This work is supported by the National Science Foundation under Grant No. 1900638, and the Alfred P. Sloan Foundation under Grant No. G-2020-13938.
367
+
368
+ # References
369
+
370
+ Jacob Andreas, John Bufe, David Burkett, Charles Chen, Josh Clausman, Jean Crawford, Kate Crim, Jordan DeLoach, Leah Dorner, Jason Eisner, Hao Fang, Alan Guo, David Hall, Kristin Hayes, Kellie Hill, Diana Ho, Wendy Iwaszuk, Smriti Jha, Dan Klein, Jayant Krishnamurthy, Theo Lanman, Percy Liang, Christopher H. Lin, Ilya Lintsbakh, Andy McGovern, Aleksandr Nisnevich, Adam Pauls, Dmitrij Petters, Brent Read, Dan Roth, Subhro Roy, Jesse Rusak, Beth Short, Div Slomin, Ben Snyder, Stephon Striplin, Yu Su, Zachary Tellman, Sam Thomson, Andrei Vorobev, Izabela Witoszko, Jason Wolfe, Abby Wray, Yuchen Zhang, and Alexander Zotov. 2020. Task-oriented dialogue as dataflow synthesis. Transactions of the Association for Computational Linguistics, 8:556-571.
371
+
372
+ Paweł Budzianowski, Tsung-Hsien Wen, Bo-Hsiang Tseng, Inigo Casanueva, Ultes Stefan, Ramadan Osman, and Milica Gašić. 2018. MultiWOZ - a large-scale multi-domain wizard-of-oz dataset for task-oriented dialogue modelling. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP).
373
+
374
+ Bill Byrne, Karthik Krishnamoorthi, Chinnadhurai Sankar, Arvind Neelakantan, Ben Goodrich, Daniel Duckworth, Semih Yavuz, Amit Dubey, Kyu-Young Kim, and Andy Cedilnik. 2019. Taskmaster-1: Toward a realistic and diverse dialog dataset. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4506-4517.
375
+
376
+ Giovanni Campagna, Agata Foryciarz, Mehrad Moradshahi, and Monica Lam. 2020. Zero-shot transfer learning with synthesized data for multi-domain dialogue state tracking. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 122-132, Online. Association for Computational Linguistics.
377
+
378
+ Giovanni Campagna, Silei Xu, Mehrad Moradshahi, Richard Socher, and Monica S. Lam. 2019. Genie: A generator of natural language semantic parsers for virtual assistant commands. In Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation, PLDI 2019, pages 394-410, New York, NY, USA. ACM.
379
+
380
+ Lu Chen, Boer Lv, Chi Wang, Su Zhu, Bowen Tan, and Kai Yu. 2020. Schema-guided multi-domain dialogue state tracking with graph attention neural networks. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 7521-7528.
381
+
382
+ Jianpeng Cheng, Devang Agrawal, Héctor Martínez Alonso, Shruti Bhargava, Joris Driesen, Federico Flego, Dain Kaplan, Dimitri Kartsaklis, Lin Li, Dhivya Piraviperumal, Jason D. Williams, Hong Yu, Diarmuid Ó Séaghdha, and Anders Johannsen. 2020. Conversational semantic parsing
383
+
384
+ for dialog state tracking. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8107-8117, Online. Association for Computational Linguistics.
385
+ Arash Einolghozati, Sonal Gupta, Mrinal Mohit, and Rushin Shah. 2019. Improving robustness of task oriented dialog systems. arXiv preprint arXiv:1911.05153.
386
+ Layla El Asri, Hannes Schulz, Shikhar Sharma, Jeremie Zumer, Justin Harris, Emery Fine, Rahul Mehrotra, and Kaheer Suleman. 2017. Frames: a corpus for adding memory to goal-oriented dialogue systems. In Proceedings of the 18th Annual SIG-dial Meeting on Discourse and Dialogue, pages 207-219, Saarbrücken, Germany. Association for Computational Linguistics.
387
+ Mihail Eric, Rahul Goel, Shachi Paul, Abhishek Sethi, Sanchit Agarwal, Shuyag Gao, and Dilek Hakkani-Tur. 2019. MultiWOZ 2.1: Multi-domain dialogue state corrections and state tracking baselines. arXiv preprint arXiv:1907.01669.
388
+ Mihail Eric, Lakshmi Krishnan, Francois Charette, and Christopher D. Manning. 2017. Key-value retrieval networks for task-oriented dialogue. In Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pages 37-49, Saarbrücken, Germany. Association for Computational Linguistics.
389
+ Jatin Ganhotra, Robert Moore, Sachindra Joshi, and Kahini Wadhawan. 2020. Effects of naturalistic variation in goal-oriented dialog. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 4013-4020, Online. Association for Computational Linguistics.
390
+ Ting Han, Ximing Liu, Ryuichi Takanobu, Yixin Lian, Chongxuan Huang, Wei Peng, and Minlie Huang. 2020. Multiwoz 2.3: A multi-domain task-oriented dataset enhanced with annotation corrections and co-reference annotation. arXiv preprint arXiv:2010.05594.
391
+ Michael Heck, Carel van Niekerk, Nurul Lubis, Christian Geishauser, Hsien-Chin Lin, Marco Moresi, and Milica Gasic. 2020. TripPy: A triple copy strategy for value independent neural dialog state tracking. In Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 35-44, 1st virtual meeting. Association for Computational Linguistics.
392
+ Ehsan Hosseini-Asl, Bryan McCann, Chien-Sheng Wu, Semih Yavuz, and Richard Socher. 2020. A simple language model for task-oriented dialogue. In Advances in Neural Information Processing Systems, volume 33, pages 20179–20191. Curran Associates, Inc.
393
+ J. Edward Hu, Abhinav Singh, Nils Holzenberger, Matt Post, and Benjamin Van Durme. 2019. Large-scale, diverse, paraphrastic bittexts via sampling and
394
+
395
+ clustering. In Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 44-54, Hong Kong, China. Association for Computational Linguistics.
396
+ John F Kelley. 1984. An iterative design methodology for user-friendly natural language office information applications. ACM Transactions on Information Systems (TOIS), 2(1):26-41.
397
+ Hwaran Lee, Jinsik Lee, and Tae-Yoon Kim. 2019. Sumbt: Slot-utterance matching for universal and scalable belief tracking. Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics.
398
+ Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
399
+ David McClosky, Eugene Charniak, and Mark Johnson. 2006. Effective self-training for parsing. In Proceedings of the Human Language Technology Conference of the NAACL, Main Conference, pages 152-159, New York City, USA. Association for Computational Linguistics.
400
+ Mehrad Moradshahi, Giovanni Campagna, Sina Semnani, Silei Xu, and Monica Lam. 2020. Localizing open-ontology QA semantic parsers in a day using machine translation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5970-5983, Online. Association for Computational Linguistics.
401
+ Baolin Peng, Chunyuan Li, Jinchao Li, Shahin Shayan- deh, Lars Liden, and Jianfeng Gao. 2020. Soloist: Few-shot task-oriented dialog with a single pretrained auto-regressive model. arXiv preprint arXiv:2005.05298.
402
+ Abhinav Rastogi, Xiaoxue Zang, Srinivas Sunkara, Raghav Gupta, and Pranav Khaitan. 2020. Towards scalable multi-domain conversational agents: The schema-guided dialogue dataset. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8689-8696.
403
+ Liliang Ren, Jianmo Ni, and Julian McAuley. 2019. Scalable and accurate dialogue state tracking via hierarchical sequence generation. Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP).
404
+ Pararth Shah, Dilek Hakkani-Tür, Gokhan Tur, Abhinav Rastogi, Ankur Bapna, Neha Nayak, and Larry Heck. 2018. Building a conversational agent
405
+
406
+ overnight with dialogue self-play. arXiv preprint arXiv:1801.04871.
407
+ Zachary Tellman. 2021. Designing a framework for conversational interfaces. https://www.microsoft.com/en-us/research/group/msai/articles/designing-a-framework-for-conversational-interfaces/.
408
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 5998-6008.
409
+ Yushi Wang, Jonathan Berant, and Percy Liang. 2015. Building a semantic parser overnight. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1332-1342. Association for Computational Linguistics.
410
+ Jason D Williams and Geoffrey Zweig. 2016. End-to-end LSTM-based dialog control optimized with supervised and reinforcement learning. arXiv preprint arXiv:1606.01269.
411
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
412
+ Chien-Sheng Wu, Andrea Madotto, Ehsan Hosseini-Asl, Caiming Xiong, Richard Socher, and Pascale Fung. 2019. Transferable multi-domain state generator for task-oriented dialogue systems. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 808-819.
413
+ Silei Xu, Sina Semnani, Giovanni Campagna, and Monica Lam. 2020. AutoQA: From databases to Q&A semantic parsers with only synthetic training data. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 422-434, Online. Association for Computational Linguistics.
414
+ Fanghua Ye, Jarana Manotumruksa, and Emine Yilmaz. 2021a. Multiwoz 2.4: A multi-domain task-oriented dialogue dataset with essential annotation corrections to improve state tracking evaluation. arXiv preprint arXiv:2104.00773.
415
+ Fanghua Ye, Jarana Manotumruksa, Qiang Zhang, Shenghui Li, and Emine Yilmaz. 2021b. Slot self-attentive dialogue state tracking. In Proceedings
416
+
417
+ of the Web Conference 2021, WWW '21, page 1598-1608, New York, NY, USA. Association for Computing Machinery.
418
+ Tao Yu, Rui Zhang, Oleksandr Polozov, Christopher Meek, and Ahmed Hassan Awadallah. 2021. SCoRE: Pre-training for context representation in conversational semantic parsing. In International Conference on Learning Representations.
419
+ Xiaoxue Zang, Abhinav Rastogi, Srinivas Sunkara, Raghav Gupta, Jianguo Zhang, and Jindong Chen. 2020. MultiWOZ 2.2: A dialogue dataset with additional annotation corrections and state tracking baselines. In Proceedings of the 2nd Workshop on Natural Language Processing for Conversational AI, pages 109-117, Online. Association for Computational Linguistics.
420
+ Yichi Zhang, Zhijian Ou, and Zhou Yu. 2020. Task-oriented dialog systems that consider multiple appropriate responses under the same context. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):9604-9611.
421
+ Li Zhou and Kevin Small. 2019. Multi-domain dialogue state tracking as dynamic knowledge graph enhanced question answering. arXiv preprint arXiv:1911.06192.
422
+ Barret Zoph, Golnaz Ghiasi, Tsung-Yi Lin, Yin Cui, Hanxiao Liu, Ekin Dogus Cubuk, and Quoc Le. 2020. Rethinking pre-training and self-training. Advances in neural information processing systems, 33:3833-3845.
423
+
424
+ # A ThingTalk Definition
425
+
426
+ # A.1 Syntax
427
+
428
+ Formal Dialogue $d$ : a $r^{*} s^{*}$
429
+ User State $u$ : $ua s^{*}$
430
+ Agent State $a$ : $aa as?$
431
+ User Act $ua$ : Greet | Exec | Cancel | Insist | AskRecommend | LearnMore | ActionQuestion | End | Invalid
432
+ Agent Act $aa$ : Init | Greet | RecommendOne | RecommendMany | Propose | SearchQuestion | SlotFill | LearnMoreWhat | EmptySearch | Confirm | ActionSuccess | ActionError | AnythingElse | Invalid
433
+ User Statement $s$ : $q \mid ac$
434
+ Result $r$ : $s \left[ \{ [sn = v]^{+} \} \right] *$
435
+ Agent Statement $as$ : Request $sn^{+}$ | [Propose | Confirm] $[q \mid ac]$
436
+ Query $q$ : <ThingTalk query>
437
+ Action $ac$ : $dn ([sn = v]^{*})$
438
+ Domain Name $dn$ : <identifier>
439
+ Slot Name $sn$ : <identifier>
440
+ Value $v$ : <constant>
441
+
442
+ # A.2 Agent Definition
443
+
444
+ The agent is a function $\operatorname{Agent}(d, u) = d'$ that computes the new formal representation of the entire dialogue. The representation is constructed incrementally, starting from the initial dialogue $d_0$ which is empty.
445
+
446
+ Let $d = (a, r, s) \in \mathcal{D}$ and $u = (ua, s_{\mathrm{u}}) \in U$ be the two inputs to the agent. The agent computes the new agent state $a$ as follows:
447
+
448
+ $$
449
+ \begin{array}{l} \left(r _ {\mathrm {u}}, i s _ {\mathrm {u}}\right) = \operatorname {E x e c u t e} \left(s _ {\mathrm {u}}\right) \\ a ^ {\prime} = \operatorname {P o l i c y} (u a, r | | r _ {\mathrm {u}}, i s _ {\mathrm {u}}) \\ d ^ {\prime} = \left(a ^ {\prime}, r \right\lvert \left| r _ {\mathrm {u}}, i s _ {\mathrm {u}}\right) \\ \end{array}
450
+ $$
451
+
452
+ where $||$ denotes concatenation. The Execute function calls the ThingTalk runtime to execute the statements in the user state, $s_{\mathrm{u}}$ . It returns (1) the results $r_{\mathrm{u}}$ by executing all statements in $s_{\mathrm{u}}$ whose required parameters are available, (2) the rest of the (incomplete) statements, $i s_{\mathrm{u}}$ . The Policy function determines the agent state $a'$ from the user state $ua$ . All the results $r_{\mathrm{u}}$ appended to previous results $r$ , and $i s_{\mathrm{u}}$ . The agent returns the new dialogue $d'$ with the new agent state, all the results and the new incomplete statements. The incomplete statements $s$ in $d$ are discarded. If the user has not changed topics, information in $s$ is incorporated in $s_{\mathrm{u}}$ .
453
+
454
+ # B Training
455
+
456
+ # B.1 Preprocessing
457
+
458
+ We apply the same preprocessing used by TRADE (Wu et al., 2019) to the input utterances.
459
+
460
+ We also use a rule-based preprocessor to identify time expressions, and replace them with placeholder tokens. All slot values in the result and agent states that have string or time type are replaced with a placeholder when input to the model.
461
+
462
+ We normalize all slot values in the user state to match the utterance, regardless of typos. When comparing the slot values for equality, we normalize entity names via a database lookup.
463
+
464
+ # B.2 Hyperparameters
465
+
466
+ Our model uses a BART large model which has 400 million trainable parameters. We use the Adam optimizer, with the Transformer learning rate schedule (800 iterations of warm-up, 0.04 multiplier).
467
+
468
+ We train our model for 50,000 gradient updates on the synthesized data and choose the model with the highest validation exact-match accuracy. We then fine-tune that model on the few-shot training set for 15,000 gradient updates, and again choose the model with the highest validation accuracy. We repeat this process for another 15,000 updates on the self-train set. Training is done on a single GPU with 16 GB memory and batch size is chosen based on the length of the examples in each batch: we choose as many examples as we can fit in the GPU memory. Gradient accumulation is used to increase the effective batch size by a factor of 20.
469
+
470
+ # C Dataset Statistics
471
+
472
+ <table><tr><td></td><td># dlgs</td><td># turns</td><td># words</td></tr><tr><td>Synthesized</td><td>968,007</td><td>830,792</td><td>11,390,957</td></tr><tr><td>Paraphrased</td><td>592,970</td><td>945,946</td><td>12,845,548</td></tr><tr><td>Few-Shot</td><td>168</td><td>1,061</td><td>14,669</td></tr><tr><td>Self-Training</td><td>8,420</td><td>56,546</td><td>760,927</td></tr><tr><td>Validation</td><td>265</td><td>1,582</td><td>21,256</td></tr><tr><td>Test</td><td>995</td><td>7,271</td><td>100,814</td></tr></table>
473
+
474
+ Table 5: Statistics of our training and evaluation sets: number of dialogues, of turns, and of words. For the synthesized dataset, we do not count turns that appear identically in multiple dialogues.
475
+
476
+ # D Annotated Example Dialogue
477
+
478
+ Here is an example of a dialogue generated by the simulator, between the user U and the agent A. US denotes the user state, D denotes the formal dialogue.
479
+
480
+ U: List good restaurants which offer Indian.
481
+
482
+ US: Exec: Restaurant, food = "Indian";
483
+
484
+ A: Do you have a specific part of town in mind?
485
+
486
+ D: SearchQuestion: area;
487
+
488
+ ```txt
489
+ Restaurant,food $=$ "Indian
490
+ {name $=$ "Cocum",area $=$ west,...}, {name $=$ "Rajmahal",area $=$ east,...}, ...];
491
+ ```
492
+
493
+ U: Yes I'd like near the east.
494
+
495
+ US: Exec: Restaurant, food = "Indian" && area = east;
496
+
497
+ A: I have Rajmahal and Curry Prince.
498
+
499
+ ```matlab
500
+ D:RecommendTwo Restaurant,food $=$ "Indian [...]; Restaurant,food $=$ "Indian"& $\&$ area $\equiv$ east [name $=$ "Rajmahal",area $\equiv$ east,...}, {name $=$ "Curry Prince",area $\equiv$ east,...}, ...];
501
+ ```
502
+
503
+ U: Is Curry Prince a restaurant serving Indian?
504
+
505
+ US: Exec: [food] of Restaurant, food = "Indian" && area = east && name = "Curry Prince";
506
+
507
+ A: Curry Prince serves Indian. Would you like me to book it?
508
+
509
+ ```txt
510
+ D:RecommendOne:ProposeRestaurant.MakeReservation(name $\equiv$ "CurryPrince",book_people $= 6$
511
+ Restaurant,food $=$ "Indian" [..]; Restaurant,food $=$ "Indian"&\~area $\equiv$ east [..]; [food]ofRestaurant,food $=$ "Indian"&\~area $\equiv$ east&&name $\equiv$ "Curry Prince" [ $\{\mathrm{name} = \mathrm{"CurryPrincse"}$ ,food $\equiv$ "Indian”]};
512
+ ```
513
+
514
+ U: I will take it. Please make a reservation at the restaurant on Monday at 13:00.
515
+
516
+ US: Exec: Restaurant.MakeReservation(name="Curry Prince", book_people=6, book_day=Sunday, book_time=13:00)
517
+
518
+ A: Alright, Consider your request done! Its confirmation number is ay05tzjq. Anything else for you today?
519
+
520
+ ```matlab
521
+ D: ActionSuccess; Restaurant, food = "Indian" [...]; Restaurant, food = "Indian" && area = east [...]; [food] of Restaurant, food = "Indian" && area = east && name = "Curry Prince" [...]; Restaurant.MakeReservation(name="Curry Prince", book_people=6, book_day=Sunday, book_time=13:00) [[reference_number = "ay05tzjq"]];
522
+ ```
523
+
524
+ U: Thanks, this is good for now. Goodbye!
525
+
526
+ US: End
afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ce314994bf23e48719d400ef03eb35a172f9e02e41b2da14cb0d9cf3162740e
3
+ size 240082
afewshotsemanticparserforwizardofozdialogueswiththeprecisethingtalkrepresentation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7555dd189a8148a8dee3631a557ccc7c8d1e7453e1d37f3d6f72970efa80231
3
+ size 567058
agraphenhancedbertmodelforeventprediction/a3318d62-e30c-46fd-9e73-e5438c9559e1_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e3c9f64e1e2e29bccdac6ba9724f98064b79bcef1fb014e037fc2c8cc7adfba
3
+ size 82708
agraphenhancedbertmodelforeventprediction/a3318d62-e30c-46fd-9e73-e5438c9559e1_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17d285a9c8b365d517179cccac461602da741c2140cbf19183f43c9dfb75abf7
3
+ size 94969
agraphenhancedbertmodelforeventprediction/a3318d62-e30c-46fd-9e73-e5438c9559e1_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8cdb0b53be48892953fa3f9f395cce60d481fd96b10279a8131c4b8c09367b6
3
+ size 784887
agraphenhancedbertmodelforeventprediction/full.md ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Graph Enhanced BERT Model for Event Prediction
2
+
3
+ Li Du†, Xiao Ding†*, Yue Zhang‡, Kai Xiong†, Ting Liu†, and Bing Qin†
4
+
5
+ $^{\dagger}$ Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China
6
+
7
+ {ldu,xding,kxiong,tliu,qinb} $@$ ir.hit.edu.cn
8
+
9
+ ‡ School of Engineering, Westlake University zhangyue@westlake.edu.cn
10
+
11
+ # Abstract
12
+
13
+ Predicting the subsequent event for an existing event context is an important but challenging task, as it requires understanding the underlying relationship between events. Previous methods propose to retrieve relational features from event graph to enhance the modeling of event correlation. However, the sparsity of event graph may restrict the acquisition of relevant graph information, and hence influence the model performance. To address this issue, we consider automatically building of event graph using a BERT model. To this end, we incorporate an additional structured variable into BERT to learn to predict the event connections in the training process. Hence, in the test process, the connection relationship for unseen events can be predicted by the structured variable. Results on two event prediction tasks: script event prediction and story ending prediction, show that our approach can outperform state-of-the-art baseline methods.
14
+
15
+ # 1 Introduction
16
+
17
+ Understanding the semantics of events and their underlying connections is a long-standing task in natural language processing (Minsky, 1974; Schank, 1975). Much research has been done on extracting script knowledge from narrative texts, and making use of such knowledge for predicting a likely subsequent event given a set of context events.
18
+
19
+ A key issue to fulfilling such tasks is the modeling of event relation information. To this end, early work exploited event pair relations (Chambers, 2008; Jans et al., 2012; Granroth and Clark, 2016) and temporal information (Pichotta, 2016; Pichotta and Mooney, 2016). The former has been used for event prediction by using embedding methods, where the similarity between subsequent events and context events are measured and used for candidate ranking. The latter has been used
20
+
21
+ ![](images/2cebd38bdee6c39af925f63ee90a6f5367c10524d3b560f40e3e9e05c46e59b6.jpg)
22
+ Figure 1: (a) An example for event prediction. (b) Given an event sequence, retrieval-based methods lookup structural information of events from event graph. However, in the test process, part of events may be not covered by the event graph, hence their connection information is unavailable. Different from retrieval-based methods, GraphBERT is able to predict the connection strength between events.
23
+
24
+ for neural network methods, where models such as LSTMs have been used to model a chain of context events. There has also been work integrating the two methods (Wang et al., 2017).
25
+
26
+ Despite achieving certain effectiveness, the above methods do not fully model the underlying connection between context events. As shown in Figure 1 (a), given the facts that Jason had been overstretched at work, He decided to change job and Jason finds a new job, the subsequent event Jason is satisfied with his new job is more likely than Jason feels much stressed at his new job, which can be inferred by understanding the fact that the reason for his new job search is stress in his job. Li et al. (2018b) and Koncel et al. (2019) consider such context structure by building event evolutionary graphs, and using network embedding models to extract relational features. For these methods, event graphs serve as a source of external structured knowledge, which are extracted from narrative texts and provide prior features for event correlation.
27
+
28
+ One limitation of their methods is that the effectiveness of their methods heavily relies on the
29
+
30
+ coverage of the event graph. As shown in Figure 1 (b), Li et al. (2018b) and Koncel et al. (2019)'s methods work by looking up the event tuples in the event graph to retrieve the connection information between events for predicting the output. This is done by the standard knowledge graph lookup operation. However, if the context events are not in the event graph, the method cannot find relevant information. Figure 1 (b) shows an extreme case. In event sequence $\beta$ , although the context events be starving and go for a meal are highly similar to the event graph content feel hungry and go for lunch, the retrieval-based methods can fail to match context events in the event graph and utilize the event graph knowledge. However, in practice, it is infeasible to construct an event graph that covers most of the possible events. As an event is the composition of multiple arguments, so the same event can correspond to various semantically equivalent expressions, such as "feel hungry" vs "be starving", or "hunger", etc. This would limit the performance of the retrieval-based systems.
31
+
32
+ To address this issue, we consider automatically predicting the event links using a graph-enhanced BERT model (GraphBERT). As shown in Figure 1 (b), we collect event structure information into a BERT model with graph structure extension. Given a set of event contexts, we use the GraphBERT model to construct an event graph structure by predicting connection strengths between context events, instead of retrieving them from a prebuilt event graph. Specifically, we extend the BERT model by introducing a structured variable, which captures the connection strengths between events. As shown in Figure 2, during training, both context events and external event graph information are used to train the structured variable. During testing, the structured variable which describes connection strengths between events is obtained using the context event only, which is used for finding the next event. Subsequently, we encode the predicted link strength for making a prediction.
33
+
34
+ Experimental results on standard datasets show that our model outperforms baseline methods. Further analysis demonstrates that GraphBERT can predict the connection strengths for unseen events and improve the prediction accuracy. The codes are publicly available at https://github.com/sjcfr.
35
+
36
+ # 2 Background
37
+
38
+ As shown in Figure 1 (a), the task of event prediction (Mostafazadeh et al., 2016; Li et al., 2018b) can be defined as choosing the most reasonable subsequent event for an existing event context. Formally, given a candidate event sequence $X = \{X_{e_1},\ldots ,X_{e_t},X_{e_{c_j}}\}$ , where $\{X_{e_1},\dots,X_{e_t}\}$ are $t$ context events and $X_{e_{c_j}}$ is the $c_j$ th candidate subsequent event, the prediction model is required to predict a relatedness score $Y\in [0,1]$ for the candidate subsequent event given the event context.
39
+
40
+ Event graphs (Li et al., 2018b) have been used to represent relationships between multiple events. Formally, an event graph could be denoted as $G = \{V, R\}$ , where $V$ is the node set, $R$ is the edge set. Each node $V_i \in V$ corresponds to an event $X_i$ , while each edge $R_{ij} \in R$ denotes a directed edge $V_i \to V_j$ along with a weight $W_{ij}$ , which is calculated by:
41
+
42
+ $$
43
+ W _ {i j} = \frac {\operatorname {c o u n t} \left(V _ {i} , V _ {j}\right)}{\sum_ {k} \operatorname {c o u n t} \left(V _ {i} , V _ {k}\right)} \tag {1}
44
+ $$
45
+
46
+ where $\mathrm{count}(V_i, V_j)$ denotes the frequency of a bigram $(V_i, V_j)$ . Hence, the weight $W_{ij}$ is the probability that $X_j$ is the subsequent event of $X_i$ .
47
+
48
+ # 3 Baseline System
49
+
50
+ Before formally introducing the GraphBERT framework, we first introduce a retrieval-based baseline system. As Figure 2 (a) shows, given an event sequence $X = \{X_{e_1},\dots ,X_{e_t},X_{e_{c_j}}\}$ , the baseline system retrieves the corresponding structural information for each event within $X$ from a prebuilt event graph $G$ , and then integrates the retrieved structural information into the BERT frame for predicting the relatedness score $Y$ .
51
+
52
+ For an arbitrary event tuple $(X_{e_i}, X_{e_j})$ , if it is covered by the event graph $G$ (i.e., both $X_{e_i}$ and $X_{e_j}$ are nodes of $G$ ), then we can retrieve the corresponding node embeddings $e_i$ and $e_j$ , together with the edge weight $A_{ij}$ by matching the event tuple in the event graph. The representation vector of the events within $X$ further form into an embedding matrix $E$ , and the edge weights form into an adjacency matrix $A$ . To make use of the retrieved structural information for enhancing the prediction process, we first employ a graph neural network to combine the event representation matrix and the adjacency matrix:
53
+
54
+ $$
55
+ E ^ {(U)} = \sigma (A E W _ {U}) \tag {2}
56
+ $$
57
+
58
+ ![](images/a91eaff0d2f51c043e106a22cabb756e4c7899fc5e5f6c6f540b3aa4bce2eed0.jpg)
59
+ Figure 2: Model Structure. (a) Architecture of the baseline system. Given an event sequence, the baseline system retrieves event node features and connection strength from a prebilit event graph. (b) In addition to the baseline system, GraphBERT introduces an additional aggregator to obtain event representation from the hidden states of BERT, and learns to predict the connection strength between events in the training process using the inferer. So that in the test process, the connection information can be predicted for arbitrary event.
60
+
61
+ ![](images/89f55363933df79958d4b24a64c145ff5a901fda67e030c4098c73581f44f2bc.jpg)
62
+
63
+ where $W_{U}\in \mathbb{R}^{d\times d}$ is a weight matrix; $\sigma$ is a sigmoid function; $E^{(U)}$ is the event representation matrix updated by $A$ .
64
+
65
+ Then the combined event graph knowledge can be merged into the frame of BERT for enhancing the prediction process. To this end, we employ an attention operation to softly select relevant information from the updated event representations $E^{(U)}$ , and then update the hidden states of BERT. Specifically, we take the hidden states of the $s_1$ th Transformer layer of BERT (denoted as $H^{s_1}$ ) as the query, and take the updated event representation $E^{(U)}$ as the key:
66
+
67
+ $$
68
+ E ^ {(U) ^ {*}} = \operatorname {M u l t i A t t n} \left(H ^ {s _ {1}}, E ^ {(U)}\right) \tag {3}
69
+ $$
70
+
71
+ where $E^{(U)^*}$ carries information selected from $E^{(U)}$ and relevant to $H^{s_1}$ .
72
+
73
+ Then we merge $E^{(U)^*}$ with $H^{s_1}$ through an addition operation, and employ layer normalization to keep gradient stability:
74
+
75
+ $$
76
+ H ^ {s _ {1} *} = \operatorname {L a y e r N o r m} \left(E ^ {(U) *} + H ^ {s _ {1}}\right) \tag {4}
77
+ $$
78
+
79
+ $H^{s_1*}$ contains both the node feature information and the connection information between events. By taking $H^{s_1*}$ as the input of the subsequent $(s_1 + 1)$ th Transformer layers of BERT, the event prediction process is enhanced with the predicted event graph knowledge.
80
+
81
+ This retrieval-based baseline system can be regarded as the adaption of Li et al. (2018b) and Koncel et al. (2019)'s retrieval-based methods on a pretrained model BERT.
82
+
83
+ # 4 GraphBERT
84
+
85
+ A critical weakness of the retrieval-based baseline system is that it heavily relies on the coverage of
86
+
87
+ the event graph. In other words, if an event is not covered by the event graph, then the structural information (i.e., node features and the adjacency matrix) would be absent from the constructed event graph, which further limits the model performance.
88
+
89
+ In this paper, we propose a predictive-based framework GraphBERT. GraphBERT uses the transformer layers of BERT as an encoder to obtain the representation for arbitrary events, and then learns to predict the link strength between events in the training process, so that the sparsity issues in the retrieval process can be avoided.
90
+
91
+ To this end, as Figure 2 (b) shows, in contrast to the retrieval-based baseline system, we introduce two more modules: (1) An aggregator to obtain event representations from the BERT framework; (2) an inferer to predict the link strength between events based on the event representations.
92
+
93
+ # 4.1 Event Encoding
94
+
95
+ Given an event sequence $X$ , to calculate the event representations and predict the link strength for events within $X$ , GraphBERT first encodes $X$ into a set of token-level distributed representations by taking the 1st- $s_0$ th Transformer layers of BERT as an encoder. Then an aggregator is employed to aggregate the token level representations into event representations.
96
+
97
+ Token Level Representations For an event sequence $X = \{X_{1},\dots ,X_{t + 1}\}$ where $X_{i} = \{x_{1},\ldots ,x_{l_{i}}\}$ is an event within $X$ and with $l_{i}$ tokens, the $s_0$ th Transformer layer of BERT encodes these tokens into contextualized distributed representations $H^{s_0} = \{(h_1^1,\dots,h_l_1^1),\dots ,(h_1^{t + 1},\dots,h_{l_{t + 1}}^{t + 1})\}$ where $h_j^i\in \mathbb{R}^{1\times d}$ is the distributed representation of the $j$ th token of event $X_{i}$ . Then we conduct the graph information prediction as well as the predic
98
+
99
+ tion task based on the token representations.
100
+
101
+ Event Level Representations An aggregator module aggregates tokens representation of events derived from the hidden states of BERT (i.e., $H^{s_0}$ ) to obtain the event level representations. For an arbitrary event $X_{i} \in X$ , we employ a multi-head attention operation (Vaswani et al., 2017) to aggregate information from the corresponding token representations $H_{i}^{s_{0}} = (h_{1}^{i},\dots ,h_{l_{i}}^{i})$ and obtain the vector representation of $X_{i}$ . Specifically, we define the query matrix of attention operation as $q_{i} = \frac{1}{l_{i}}\sum h_{l}^{i}$ , and take $H_{i}^{s_{0}}$ as the key matrix as well as the value matrix. Then the representation of $X_{i}$ is calculated as:
102
+
103
+ $$
104
+ \hat {e} _ {i} = \operatorname {M u l t i A t t n} \left(q _ {i}, H _ {i} ^ {s _ {0}}, H _ {i} ^ {s _ {0}}\right) \tag {5}
105
+ $$
106
+
107
+ where $\hat{e}_i\in \mathbb{R}^{1\times d}$
108
+
109
+ In this way, we can obtain the representation of all events within $X$ , which we denote as $\hat{E} = \{\hat{e}_1,\dots ,\hat{e}_{t + 1}\}$ , where $\hat{E}\in \mathbb{R}^{(t + 1)\times d}$ is a matrix. Note that through the embedding layer of BERT, position information has been injected into the token representations. Thus $\hat{E}$ carries event order information.
110
+
111
+ Then the event representation matrix $\hat{E}$ is used for predicting the link strength between events. Hence, the performance of link strength prediction can be strongly influenced by the quality of $\hat{E}$ . By deriving $\hat{E}$ from the hidden states of BERT, the abundant language knowledge within BERT can be utilized to obtain the event representations.
112
+
113
+ # 4.2 Link Strength Prediction
114
+
115
+ Given the event representation matrix $\hat{E}$ as node features, we employ an inferer module to predict the connection strength between arbitrary events within $X$ , regardless of whether these events are seen in the training process. The output is a matrix $\hat{A} \in \mathbb{R}^{(t+1) \times (t+1)}$ , where $\hat{A}_{ij}$ models the probability that event $j$ is the subsequent event of event $i$ .
116
+
117
+ We stack $n$ graph attention (GAT) layers (Veličković et al., 2017) for consolidating event features. For an event $X_{i}$ , the GAT layer works on the neighborhood of $X_{i}$ to aggregate information. Since the connection between events are unknown a priori, we set the neighborhood set of event $X_{i}$ as $\mathcal{N}_i = \{X_j\}$ , where $X_{j} \in X, j \neq i$ .
118
+
119
+ Therefore, at the $k$ th graph attention layer, given the representation of the $i$ th event $\hat{e}_i^k$ , we calculate the attention coefficients between other events and derive deep event representation as:
120
+
121
+ $$
122
+ \begin{array}{l} \alpha_ {i j} = \operatorname {s o f t m a x} _ {j, j \in \mathcal {N} _ {i}} \left(\operatorname {R e l u} \left(u \left[ W _ {\alpha} \hat {e} _ {i} ^ {k} \mid \left| W _ {\alpha} \hat {e} _ {j} ^ {k} \right|\right)\right)\right) \\ \hat {e} _ {i} ^ {k + 1} = \sigma \left(\sum_ {j \in \mathcal {N} _ {i}} \alpha_ {i j} \mathbf {W} _ {\alpha} \hat {e} _ {j} ^ {k}\right) \tag {6} \\ \end{array}
123
+ $$
124
+
125
+ where $u\in \mathbb{R}^{1\times 2d}$ $W_{\alpha}\in \mathbb{R}^{d\times d}$ are trainable parameters, $|\cdot |.\cdot$ is a concatenation operation. At the first GAT layer, $\hat{e}_i^1$ is initialized by $\hat{e}_i$ derived from the aggregator.
126
+
127
+ After $n$ graph attention operations, we employ a bilinear map to calculate a relation strength score between two events within $X$ based on their deep representations:
128
+
129
+ $$
130
+ \Gamma_ {i j} = \left(\hat {e} _ {i} ^ {n} W _ {R} \mathrm {T} \left(\hat {e} _ {j} ^ {n}\right)\right) \tag {7}
131
+ $$
132
+
133
+ where $W_{R}\in \mathbb{R}^{d\times d}$ are learnable parameters, $T(\cdot)$ is the transpose operation. For all $t + 1$ events within $X$ , the relation strength score between arbitrary two events forms a matrix $\Gamma \in \mathbb{R}^{(t + 1)\times (t + 1)}$ , with each element $\Gamma_{ij}$ measuring the relation strength between $X_{i}$ and $X_{j}$ .
134
+
135
+ Then we normalize the relation strength scores using the softmax function:
136
+
137
+ $$
138
+ \hat {A} _ {i j} = \operatorname {s o f t m a x} _ {j} \left(\Gamma_ {i j}\right) \tag {8}
139
+ $$
140
+
141
+ After the layer normalization, $\sum_{j}\hat{A}_{ij} = 1$
142
+
143
+ Hence, with the aggregator and the inferer, GraphBERT can obtain representation and connection strengths for arbitrary events, regardless of whether or not the event is covered by the event graph. Then the predicted adjacency matrix $\hat{A}$ and event representations $\hat{E}$ can be used for prediction, and the process is same as the retrieval-based baseline, as described in Eq.(2)-Eq.(4).
144
+
145
+ # 4.3 Training of Inferer
146
+
147
+ In the training process, we employ a tutor module to supervise the prediction of $\hat{A}$ using the structural information from a prebuilt event graph. Given an event sequence $X$ , the tutor obtains an adjacency matrix $A$ based on the edge weights of the event graph. Formally, the weights of $A$ are initialized as:
148
+
149
+ $$
150
+ A _ {i j} = \left\{\begin{array}{l l}W _ {i j},&\text {i f} V _ {i ^ {\prime}} \rightarrow V _ {j ^ {\prime}} \in R,\\0,&\text {o t h e r s .}\end{array}\right. \tag {9}
151
+ $$
152
+
153
+ where $V_{i'}$ , $V_{j'}$ are nodes in the event graph corresponding to the $i$ th and the $j$ th event of the candidate event sequence. The same as the predicted event adjacency matrix $\hat{A}$ , $A$ is also a $\mathbb{R}^{(t+1) \times (t+1)}$ matrix.
154
+
155
+ We scale $A$ to make each row sum equals 1. Therefore, each element of $A$ models the probability that the $j$ th event is the subsequent event of
156
+
157
+ the $i$ th event in $X$ . In the training process, through minimizing the distance between $\hat{A}$ and $A$ , the inferer module is supervised by the tutor to learn to predict the event connection strength based on the event representations.
158
+
159
+ # 4.4 Optimization
160
+
161
+ The overall loss function is defined as:
162
+
163
+ $$
164
+ L = L _ {\text {E v e n t P r e d i c t i o n}} + \lambda L _ {\text {G r a p h R e c o n s t r u c t i o n}} \tag {10}
165
+ $$
166
+
167
+ where $L_{\text{Event Prediction}}$ is a cross-entropy loss measuring the difference between predicted relatedness score $Y$ and golden label, $L_{\text{Graph Reconstruction}}$ assess the difference between $A$ and $\hat{A}$ , $\lambda$ is an additional hyperparameter for balancing the prediction loss with graph reconstruction loss.
168
+
169
+ For calculating $L_{\mathrm{Graph~Reconstruction}}$ , we cast both $A$ and $\hat{A}$ as a set of random variables, and employ the KL divergence to measure their difference:
170
+
171
+ $$
172
+ \begin{array}{l} L _ {\text {G r a p h R e c o n s t r u c t i o n}} = \\ \sum_ {i} \mathrm {K L} (\text {M u l t i N o m i a l} (\hat {A} _ {i}) | | \text {M u l t i N o m i a l} (A _ {i})) \tag {11} \\ \end{array}
173
+ $$
174
+
175
+ where $i$ denotes the ith row, and MultiNomial $(\cdot)$ denotes the multinomial distribution.
176
+
177
+ # 5 Experiments
178
+
179
+ We evaluate our approach on two event prediction tasks: Multiple Choice Narrative Cloze Task (MCNC) (Granroth and Clark, 2016) and Story Cloze Test (SCT) (Mostafazadeh et al., 2016) by constructing an event graph based on the training set of MCNC to train the GraphBERT model and then adapts the GraphBERT model trained on the MCNC dataset to the SCT dataset to evaluate whether GraphBERT can predict the link strength between unseen events to enhance the prediction performance.
180
+
181
+ # 5.1 Dataset
182
+
183
+ Multiple Choice Narrative Cloze Task The MCNC task requires the prediction model to choose the most reasonable subsequent event from five candidate events given an event context (Granroth and Clark, 2016). In this task, each event is abstracted to Predicate-GR form (Granroth and Clark, 2016), which represents an event in a structure of {subject, predicate, object, prepositional object}. Following Granroth and Clark (2016), we extract event chains from the New York Times portion of the Gigaword corpus. The detailed statistics of the dataset are shown in Table 1.
184
+
185
+ <table><tr><td></td><td>Training</td><td>Dev.</td><td>Test</td></tr><tr><td>#Documents</td><td>830,643</td><td>103,583</td><td>103,805</td></tr><tr><td>#Event Chains</td><td>140,331</td><td>10,000</td><td>10,000</td></tr><tr><td>#Unique Events</td><td>430,516</td><td>44,581</td><td>47,252</td></tr><tr><td>#Uncovered Events</td><td>0</td><td>24,358</td><td>24,081</td></tr></table>
186
+
187
+ Table 1: Statistics of the MCNC dataset.
188
+
189
+ Story Cloze Test Task The SCT task requires models to select the correct ending from two candidates given a story context. Compared with MCNC which focuses on abstract events, the stories in SCT are concrete events and with much more details. This dataset contains a five-sentence story training set with 98,162 instances, and 1,871 four-sentence story contexts along with a right ending and a wrong ending in the dev. and test dataset, respectively. Because of the absence of wrong ending in the training set, we only use the development and the test dataset, and split the development set into 1,771 instances for finetuning models and 100 instances for the development purpose.
190
+
191
+ # 5.2 Construction of Event Graph
192
+
193
+ The event graph is constructed based on the training set of the MCNC dataset. Each event within the training set of MCNC is taken as a node of the event graph, and the edge weights are obtained by calculating the event bigram frequency. Note that, as shown in Table 1, although the events have been processed into a highly abstracted form to alleviate the sparsity, there are still nearly half of the events in the development and test set of MCNC remains uncovered by the event graph. In the test process, for retrieval-based methods, given a candidate event sequence with length $t + 1$ , the edge weights for events not covered by the event graph are all set as $1 / (t + 1)$ .
194
+
195
+ # 5.3 Experimental Settings
196
+
197
+ We implement the GraphBERT model using pretrained BERT-base model, which contains 12 Transformer layers. We aggregate the token representations from the 7th Transformer layer of BERT, and merge the updated event representations to the 10th Transformer layer of BERT. The aggregator has a dimension of 768, and contains 12 attention heads. The inferer contains 1 GAT layer. The balance coefficient $\lambda$ equals 0.01. During the training and testing process, we concatenate the elements of the Predicate-GRs to turn the Predicate-GRs into strings, so that the event sequences can conform to the input format of the GraphBERT model. More details are provided in the Appendix.
198
+
199
+ # Baselines for MCNC
200
+
201
+ # Event Pair and Event Chain Based Methods
202
+
203
+ (i) Event-Comp (Granroth and Clark, 2016) calculates the pair-wise event relatedness score using a Siamese network. (ii) PairLSTM (Wang et al., 2017) integrates event order information and pairwise event relations to predict the ending event. (ii) RoBERTa-RF (Lv et al., 2020) enhances pretrained language model RoBERTa with chain-wise event relation knowledge for making prediction.
204
+
205
+ # Event Graph Based Methods
206
+
207
+ (i) SGNN (Li et al., 2018b) constructs a narrative event evolutionary graph (NEEG) to describe event connections, and propose a scaled graph neural network to predict the ending event based on structural information retrieved from the NEEG. (ii) HeterEvent (Zheng et al., 2020) encodes events using BERT, and implicitly models the word-event relationship by an heterogeneous graph attention mechanism. (iii) GraphTransformer (Koncel et al., 2019) retrieves structural information from event graph and introduces an additional graph encoder upon BERT to leverage the structural information. Pretrained Language Model Based Methods
208
+
209
+ (i) BERT (Devlin et al., 2019) refers to the BERT-base model finetuned on the MCNC dataset. (ii) $\mathrm{GraphBERT}_{\lambda = 0}$ refers the GraphBERT model optimized with the balance coefficient $\lambda$ set as 0. Hence, the structural information cannot be incorporated through the graph reconstruction term.
210
+
211
+ # 5.3.1 Settings for SCT
212
+
213
+ To test the generality of GraphBERT, we examine whether GraphBERT can utilize the structural knowledge learned from MCNC-based event graph to guide the SCT task. To make fair comparisons, we also trained the BERT (Devlin et al., 2019), GraphTransformer (Koncel et al., 2019) on the MCNC dataset, then finetuned them on the SCT dataset. In the following sections, we use the subscript "MCNC" to denote the model which has been trained on the MCNC dataset.
214
+
215
+ However, in the finetuning and test process, GraphTransformer still relies on an event graph to provide structural information. To address this issue, we abstract each event in the finetuning set and test set of SCT into the Predicate-GR form, which is the same form with the nodes in the MCNC-based event graph. As a result, structural information for an event in SCT can be retrieved from the MCNC-based event graph using its corresponding Predicate-GR form, once the event is covered by the event graph.
216
+
217
+ In addition to the above-mentioned methods, on the SCT dataset, we also compare GraphBERT with the following event-chain-based baselines:
218
+
219
+ (i) HCM (Chaturvedi et al., 2017) trains a logistic regression model based on contextual semantic features. (ii) ISCK (Chen, 2019) integrates narrative sequence and sentimental evolution information to predict the story ending.
220
+
221
+ # 5.3.2 Overall Results
222
+
223
+ We list the results on MCNC and SCT in Table 2 and Table 3, respectively. From the results on MCNC (Table 2), we can observe that:
224
+
225
+ (1) Compared to event-pair-based EventComp and event-chain-based PairLSTM, event-graph-based methods (i.e. SGNN, HeterEvent, GraphTransformer, and GraphBERT) show better performance. In addition, GraphBERT outperforms event-chain based RoBERTa-RF, though RoBERTa-RF is built upon a much more powerful language model. This confirms that involving event structural information could be effective for this task.
226
+
227
+ (2) Compared to BERT and GraphBERT $_{\lambda=0}$ , graph enhanced models GraphTransformer and GraphBERT further improve the accuracy of script event prediction (T-test; P-Value $< 0.01$ ). This shows that linguistic and structural knowledge can have a complementary effect.
228
+
229
+ (3) Compared to the retrieval-based method GraphTransformer, GraphBERT shows efficiency of learning structural information from the event graph (T-test; P-Value $< 0.01$ ). This indicates that GraphBERT is able to learn the structural information from the event graph in the training process, and predict the correct structural information for unseen events in the test process.
230
+
231
+ Results on the SCT dataset (Table 3) show that:
232
+
233
+ (1) Comparing GraphBERT with $\mathrm{BERT}_{\mathrm{MCNC}}$ , $\mathrm{GraphBERT}_{\lambda=0,\mathrm{MCNC}}$ shows that the graph information can also be helpful for the SCT task.
234
+ (2) Though incorporated graph information, the performance of GraphTransformer is close or inferior to BERT on SCT. This could be because of the limited size of the SCT development set, which contains 1,771 samples and might be insufficient to adapt GraphTransformer to the SCT problem. However, GraphBERT shows a $1.3\%$ absolute improvement over BERT, which indicates the efficiency of GraphBERT in predicting the link strength between unseen events for predicting the ending event.
235
+
236
+ <table><tr><td>Methods</td><td>Accuracy(%)</td></tr><tr><td>Random</td><td>20.00**</td></tr><tr><td>EventComp (Granroth and Clark, 2016)</td><td>49.57**</td></tr><tr><td>PairLSTM (Wang et al., 2017)</td><td>50.83**</td></tr><tr><td>SGNN (Li et al., 2018b)</td><td>52.45**</td></tr><tr><td>BERT (Devlin et al., 2019)</td><td>57.35**</td></tr><tr><td>GraphTransformer (Koncel et al., 2019)</td><td>58.53**</td></tr><tr><td>HeterEvent (Zheng et al., 2020)</td><td>58.10**</td></tr><tr><td>GraphBERTλ=0</td><td>57.23**</td></tr><tr><td>RoBERTa-RF (Lv et al., 2020)</td><td>58.66**</td></tr><tr><td>GraphBERT</td><td>60.72</td></tr></table>
237
+
238
+ Table 2: Performance of GraphBERT and baseline methods on the test set of MCNC. Accuracy marked with * means p-value < 0.05 and ** indicates p-value < 0.01 in T-test.
239
+
240
+ <table><tr><td>Methods</td><td>Accuracy(%)</td></tr><tr><td>HCM (Chaturvedi et al., 2017)</td><td>77.6**</td></tr><tr><td>ISCK (Chen, 2019)</td><td>87.6**</td></tr><tr><td>BERT (Devlin et al., 2019)</td><td>88.1*</td></tr><tr><td>BERTMCNC</td><td>88.5*</td></tr><tr><td>GraphTransformerMCNC (Koncel et al., 2019)</td><td>88.9</td></tr><tr><td>HeterEventMCNC (Zheng et al., 2020)</td><td>88.4*</td></tr><tr><td>GraphBERTλ=0,MCNC</td><td>88.3*</td></tr><tr><td>GraphBERTMCNC</td><td>89.8</td></tr></table>
241
+
242
+ Table 3: Model performance on the test set of SCT. Accuracy marked with * means p-value $< {0.05}$ and ** indicates p-value $< {0.01}$ in T-test.
243
+
244
+ # 5.4 Influence of the Accuracy of the Predicted Link Strength
245
+
246
+ We investigate the relationship between the accuracy of the predicted link strengths with the model performance. However, for events in the test set, the golden event graph is unavailable. To address this issue, we split the original training set of MCNC into a new training and evaluating set, containing 120,331 and 20,000 instances, respectively. For each sample, we calculate the Pearson correlation coefficient between the predicted connection strengths and connection strengths derived from the event graph, as well as the relationship between such correlation coefficient and model performance. The results are shown in Figure 3. We observe that, in general, GraphBERT can predict the connection between arbitrary events with reasonable accuracy. Also, the model performance improves as the connection prediction accuracy increases. This confirms that correctly predicting the event connections for unseen events can be helpful for the event prediction process.
247
+
248
+ # 5.5 Influence of the Coverage of the Event Graph
249
+
250
+ We conduct experiments to investigate the specific influence of the sparsity of the event graph on model performance. Based on the original test set of MCNC, we build new test sets with different proportions of uncovered events, and compare the
251
+
252
+ ![](images/9a4f10c055fd44ecccec5b96f8efc2b45b3d24d996b9f360e1ec486797ccfdb3.jpg)
253
+ Figure 3: (a) The distribution of Pearson correlation coefficients between the predicted connection strength and connection strength derived from the event graph. (b) Relationship between correlation coefficient and model performance.
254
+
255
+ ![](images/5691911009e1f5c67f2a2df21d9e7d9cac8183c8a2b5e8a4cd4c4750c6d33adf.jpg)
256
+
257
+ ![](images/3f09ee9ef9992bc7214d0e79890d53cad31ac037185ead92f2b8ad08c0f42e41.jpg)
258
+ Figure 4: The performance of GraphBERT and GraphTransformer under different proportion of uncovered events.
259
+
260
+ performances of the GraphBERT framework with retrieval-based method GraphTransformer (Koncel et al., 2019) on these test sets. As shown in Figure 4, as the proportion of uncovered events increase from 0 to 1, the performance of GraphTransformer shows a negative trend in general. This is because, for retrieval-based methods, with the increase of sparsity, the availability of structural information decreases. Compared to GraphTransformer, the performance of GraphBERT is more stable. These results indicate that predicting the structural information can be useful for enhancing the performance of event prediction.
261
+
262
+ # 5.6 Case Study
263
+
264
+ Table 4 provides an example of prediction results from different models on the test set of SCT. The event context describes a story that a bear appeared in the campus and policemen came to tranquilize the bear. Given the event context, GraphBERT is able to choose correct ending $\mathrm{E}_1$ The bear fell asleep, while GraphTransformer chooses the incorrect ending $\mathrm{E}_2$ The bear became very violent.
265
+
266
+ To correctly predict the story ending, a model should understand the relationship between gave a tranquilizer and fell asleep. However, event gave a tranquilizer is not covered by the event graph. Hence, the retrieval-based method GraphTransformer is unable to obtain structural information from the event graph. On the other hand, in the event graph, there is a directed edge from a node obj. sedated to node subj. slept. This indicates that,
267
+
268
+ <table><tr><td>Event Context</td><td>Candidate Subsequent Event</td><td>Model</td></tr><tr><td rowspan="2">A: I heard that my school&#x27;s campus had been closed.
269
+ B: The message said there was a bear on the grounds!
270
+ C: The police had to come and help get the bear away.
271
+ D: They gave the bear a tranquilizer.</td><td>E1: The bear fell asleep. (√)</td><td>GraphBERT</td></tr><tr><td>E2: The bear became very violent. (×)</td><td>GraphTransformer</td></tr></table>
272
+
273
+ Table 4: An example of event predictions made by GraphTransformer and GraphBERT on the SCT dataset.
274
+
275
+ GraphBERT can learn the structural knowledge from the MCNC-based event graph, and predict the connection between gave a tranquilizer and fell asleep for instances in the SCT dataset.
276
+
277
+ # 6 Discussion
278
+
279
+ The GraphBERT model employs a structure variable $\hat{A}$ to capture the "is_next_event" relationship between events. By introducing more parallel structural variables $\{\hat{A}^1,\dots ,\hat{A}^k\}$ , it can be extended to simultaneously learn multiple kinds of event relationships, such as temporal or causal relationship. Furthermore, previous researches demonstrate that the graph-structured relationship extensively exist between other semantic units, such as sentences(Yasunaga et al., 2017), or even paragraphs (Sonawane and Kulkarni, 2014). However, similar to the situation in event graph, it would be impractical to construct knowledge graphs that cover all possible connection relationships between all the sentences or paragraphs. This restricts the applicable of retrieval-based methods in these situations. On the contrary, our generative approach suggests a potential solution by learning the connection relationship from graph-structured knowledge base with limited size, then generalizing to the unseen cases.
280
+
281
+ # 7 Related Work
282
+
283
+ The investigation of scripts dates back to 1970's (Minsky, 1974; Schank, 1975). The script event prediction task models the relationships between abstract events. Previous studies propose to model the pair-wise relationship (Chambers, 2008; Jans et al., 2012; Granroth and Clark, 2016) or event order information (Pichotta and Mooney, 2016; Pichotta, 2016; Wang et al., 2017) for predicting the subsequent event. Li et al. (2018b) and Lv et al. (2019) propose to leverage the rich connection between events using graph neural network and attention mechanism, respectively.
284
+
285
+ Different from script event prediction, the story cloze task (Mostafazadeh et al., 2016) focuses on concrete events. Therefore, it requires prediction models to learn commonsense knowledge for un
286
+
287
+ derstanding the story plot and predicting the ending. To this end, Li et al. (2018a) and Guan (2019) propose to combine context clues with external knowledge such as KGs. Li et al. (2019) finetune pretrained language models to solve the task. Compared to their works, our approach can use both the language knowledge enriched in BERT to promote the comprehension of event context, and the structural information from event graph to enhance the modeling of event connections.
288
+
289
+ A recent line of work has been engaged in combining the strength of Transformer based models with graph structured data. To integrate KG with language representation model BERT, Zhang et al. (2019) encode KG with a graph embedding algorithm TransE (Bordes et al., 2013), and takes the representation of entities in KG as input of their model. However, this line of work only linearizes KGs to adapt the input of BERT. Graph structure is not substantially integrated with BERT. Guan (2019) and Koncel et al. (2019) propose retrieval-based methods to leverage the structural information of KG. However, in the event prediction task, the diversity of event expression challenges the coverage of the event graph, and prevents us from simply retrieving events in the test instances from the event graph. We propose to integrate the graph structural information with BERT through a predictive method. Compared to retrieval-based methods, our approach is able to learn the structural information of the event graph and generate the structural information of events to avoid the unavailable of structural information in test instances.
290
+
291
+ # 8 Conclusion
292
+
293
+ We devised a graph knowledge enhanced BERT model for the event prediction task. In addition to the BERT structure, GraphBERT introduces a structured variable to learn structural information from the event graph, and model the relationship between the event context and the candidate subsequent event. Compared to retrieval-based methods, GraphBERT is able to predict the link strength between all events, thus avoiding the (inevitable) sparsity of event graph. Experimental results on
294
+
295
+ MCNC and SCT task show that GraphBERT can improve the event prediction performances compared to state-of-the-art baseline methods. In addition, GraphBERT could also be adapted to other graph-structured data, such as knowledge graphs.
296
+
297
+ # 9 Acknowledgments
298
+
299
+ We thank the anonymous reviewers for their constructive comments, and gratefully acknowledge the support of the New Generation Artificial Intelligence of China (2020AAA0106501), and the National Natural Science Foundation of China (62176079, 61976073).
300
+
301
+ # References
302
+
303
+ Antoine Bordes, Nicolas Usunier, Alberto Garcia-Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi-relational data. Advances in neural information processing systems, 26.
304
+ Chambers. 2008. Unsupervised learning of narrative event chains. In Proceedings of the Association for Computational Linguistics-08: HLT, pages 789-797.
305
+ Snigdha Chaturvedi, Haoruo Peng, Dan Roth, and nbd. 2017. Story comprehension for predicting what happens next. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 1603-1614.
306
+ Chen. 2019. Incorporating structured commonsense knowledge in story completion. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6244-6251.
307
+ Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D Manning. 2019. What does bert look at? an analysis of berts attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286.
308
+ Andy Coenen, Emily Reif, Ann Yuan, Been Kim, Adam Pearce, Fernanda Viégas, and Martin Wattenberg. 2019. Visualizing and measuring the geometry of bert. In Proceedings of the 33rd International Conference on Neural Information Processing Systems, pages 8594-8603.
309
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Annual Meeting of the North American Chapter of the Association for Computational Linguistics, pages 4171-4186.
310
+ Mark Granroth and Stephen Clark. 2016. What happens next? event prediction using a compositional
311
+
312
+ neural network model. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 30.
313
+ Guan. 2019. Story ending generation with incremental encoding and commonsense knowledge. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6473-6480.
314
+ Bram Jans, Steven Bethard, Ivan Vulic, and Marie Francine Moens. 2012. Skip n-grams and ranking functions for predicting script events. In Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguistics, pages 336-344.
315
+ Ganesh Jawahar, Benoit Sagot, Djamé Seddah, Samuel Unicomb, Gerardo Iniguez, Marton Karsai, Yannick Léo, Marton Karsai, Carlos Sarraute, Éric Fleury, et al. 2019. What does bert learn about the structure of language? In 57th Annual Meeting of the Association for Computational Linguistics (ACL), Florence, Italy.
316
+ Rik Koncel, Dhanush Bekal, Yi Luan, Mirella Lapata, and Hannaneh Hajishirzi. 2019. Text generation from knowledge graphs with graph transformers. In Proceedings of the 2019 Annual Meeting of the North American Chapter of the Association for Computational Linguistics, pages 2284-2293.
317
+ Qian Li, Ziwei Li, Jin-Mao Wei, Yanhui Gu, Adam Jatowt, and Zhenglu Yang. 2018a. A multi-attention based neural network with external knowledge for story ending predicting task. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1754–1762.
318
+ Zhongyang Li, Xiao Ding, Ting Liu, et al. 2018b. Constructing narrative event evolutionary graph for script event prediction. In International Joint Conference on Artificial Intelligence 2018, pages 4201-4207. AAAI Press.
319
+ Zhongyang Li, Xiao Ding, Ting Liu, et al. 2019. Story ending prediction by transferable bert. arXiv preprint arXiv:1905.07504.
320
+ Shangwen Lv, Wanhui Qian, Longtao Huang, Jizhong Han, and Songlin Hu. 2019. Sam-net: Integrating event-level and chain-level attentions to predict what happens next. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6802-6809.
321
+ Shangwen Lv, Fuqing Zhu, and Songlin Hu. 2020. Integrating external event knowledge for script learning. In Proceedings of the 28th International Conference on Computational Linguistics, pages 306-315.
322
+ Marvin Minsky. 1974. A framework for representing knowledge.
323
+ Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. 2016. A corpus and cloze evaluation for deeper understanding
324
+
325
+ of commonsense stories. In Proceedings of the 2016 Annual Meeting of the North American Chapter of the Association for Computational Linguistics, pages 839-849.
326
+
327
+ Pichotta. 2016. Using sentence-level lstm language models for script inference. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 279-289.
328
+
329
+ Karl Pichotta and Raymond Mooney. 2016. Statistical script learning with recurrent neural networks. In Proceedings of the Workshop on Uphill Battles in Language Processing: Scaling Early Achievements to Robust Methods, pages 11-16.
330
+
331
+ Schank. 1975. Scripts, plans, and knowledge. In Proceedings of the 4th international joint conference on Artificial intelligence-Volume 1, pages 151-157.
332
+
333
+ Sheetal S Sonawane and Parag A Kulkarni. 2014. Graph based representation and analysis of text document: A survey of techniques. International Journal of Computer Applications, 96(19).
334
+
335
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.
336
+
337
+ Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. arXiv preprint arXiv:1710.10903.
338
+
339
+ Zhongqing Wang, Yue Zhang, Ching Yun Chang, and nbd. 2017. Integrating order information and event relation for script event prediction. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 57-67.
340
+
341
+ Michihiro Yasunaga, Rui Zhang, Kshitijh Meelu, Ayush Pareek, Krishnan Srinivasan, and Dragomir Radev. 2017. Graph-based neural multi-document summarization. In Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017), pages 452-462.
342
+
343
+ Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. 2019. Ernie: Enhanced language representation with informative entities. arXiv preprint arXiv:1905.07129.
344
+
345
+ Jianming Zheng, Fei Cai, Yanxiang Ling, and Honghui Chen. 2020. Heterogeneous graph neural networks to predict what happen next. In Proceedings of the 28th International Conference on Computational Linguistics, pages 328-338.
346
+
347
+ # 10 Experimental Settings
348
+
349
+ # 10.1 Training Details
350
+
351
+ To conform to the input format of BERT, for an event described in the Predicate-GR form {subject,
352
+
353
+ ![](images/727bfd3298abd03393b6ea42c75a3dd5b21e6074e868eeaf8df96c38520e669f.jpg)
354
+ Figure 5: The performance of model trained with different balance coefficient $\lambda$ .
355
+
356
+ predicate, object, prepositional object\}, we first concatenate each element within the predicate-GR into a string "subject predicate object prepositional object", so that an event described in a structured form is turned into a string. Then for satisfying the requirement of BERT, the candidate event sequence is further preprocessed into the form of:
357
+
358
+ $$
359
+ [ \text {C L S} ] e _ {1} [ \text {S E P} ] \dots e _ {t} [ \text {S E P} ] \text {c a n d i d a t e} [ \text {S E P} ] \tag {12}
360
+ $$
361
+
362
+ On the MCNC dataset, the GraphBERT model is trained for 3 epochs, with a batch size of 64, and a learning rate of 2e-5. While during the finetuning process on SCT, GraphBERT is optimized with a batch size of 16, and a learning rate of 1e-5, with 5 epochs.
363
+
364
+ # 10.2 Searching for the Balance Coefficient
365
+
366
+ In this paper, the objective function is composed of two components. Through minimizing the graph reconstruction loss, model learns to modeling the bigram event adjacency patterns. While through minimizing the prediction loss, model is trained to choose the correct ending given an event context. These two components are balanced with a coefficient $\lambda$ .
367
+
368
+ To investigate the effect of the balance coefficient, we compare the prediction accuracy of the GraphBERT model trained with different $\lambda$ and show the results in Figure 5. From which we could observe that, the prediction accuracy increases as the balance coefficient increase from 0 to 0.1. This is because the additional event graph structure information is helpful for the event prediction task. However, as the $\lambda$ exceeds 0.5, the model performances start to decrease. This is because the overemphasis of graph reconstruction loss would in turn decrease the model performance.
369
+
370
+ <table><tr><td>(4, 10)</td><td>(5, 10)</td><td>(6, 10)</td><td>(7, 10)</td><td>(8, 10)</td><td>(9, 10)</td></tr><tr><td>58.76</td><td>60.28</td><td>60.57</td><td>60.72</td><td>60.28</td><td>60.01</td></tr></table>
371
+
372
+ Table 5: Influence of start layer and merge layer on model performance.
373
+
374
+ <table><tr><td>Model</td><td>Prediction Accuracy (%)</td></tr><tr><td>BERT</td><td>57.35</td></tr><tr><td>GraphBERT</td><td>60.72</td></tr><tr><td>RoBERTa</td><td>61.19</td></tr><tr><td>GraphRoBERTa</td><td>62.81</td></tr></table>
375
+
376
+ Table 6: Performance of the event graph knowledge enhanced RoBERTa model (Graph-RoBERTa) on the MCNC dataset.
377
+
378
+ # 10.3 Searching of Start and Merge Layer in BERT
379
+
380
+ Different transformer layers of BERT tend to concentrate on different semantic and syntactic information (Clark et al., 2019; Coenen et al., 2019). Therefore, which layer is selected in the BERT to start integrating event graph knowledge, and which layer is selected to merge graph enhanced event representations can affect the performance of the model. We study such effect in two ways: first, we fix the start layer and change the merge layer. Second, we fix the gap between start and merge layer, and change the start layer. Results are shown in Table 5. The tuple $(n_1, n_2)$ denotes the (start, merge) layer. From which we could observe that, under the same gap between merge and start layer, employing the 7th transformer layer of BERT as the start layer can achieve the best result. While setting the merge-start gap as 2 is more efficient than other choices. Interestingly, Jawahar et al. (2019) find that the syntactic features can be well captured in the middle layers of BERT, especially in the 7-9 layer. This indicates that the middle layers of BERT focus more on sentence level information, and implicitly support the reasonableness that choosing the 7th and 10th transformer layer of BERT as the start end merge layer.
381
+
382
+ # 11 Enhancing Different Kinds of Pretrained Transformer-based Pretrained Language Models with Event Graph Knowledge
383
+
384
+ In this paper, we propose the GraphBERT framework, which enhances the transformer-based pretrained language model BERT with event graph knowledge through an additional structural variable $\hat{A}$ . We argue that, using the structural variable, we can also equip other transformer-based pretrained language models, such as RoBERTa, with the event
385
+
386
+ graph knowledge, and then enhance the event prediction process. This could be achieved by adapt the aggregator, inferer and merger module upon the other transformer-based frameworks.
387
+
388
+ Using the above-mentioned manner, we implemented a GraphRoBERTa model and examined its performance on the MCNC dataset. The results are shown in Table 6. We observe that, compared with BERT, RoBERTa and GraphRoBERTa show better performance. This is because, during the pretraining process, RoBERTa can acquire more abundant linguistic knowledge for understanding the events through the dynamic masked token prediction mechanism. Moreover, the comparison between GraphBERT with BERT, and between GraphRoBERTa with RoBERTa show the effectiveness of our approach in incorporating event graph knowledge with multiple prevailing transformer-based pretrained language models, to consistently enhancing the performance of event prediction.
agraphenhancedbertmodelforeventprediction/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01838e13d0684bc20d994fd4184819d8d8a424b1e078fff5bc2b3b978901d400
3
+ size 338538
agraphenhancedbertmodelforeventprediction/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6f0147bc729908a0cad91fb34ddefaea36747564209bec4ed06889c8dcc4594
3
+ size 434185
alignedweightregularizersforpruningpretrainedneuralnetworks/e672d957-667e-4af7-808d-b371f76f81f2_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b089645b05af3499dbf1f1ae4f693ff48835752a9c32cdedbd1942b5f130c828
3
+ size 69731
alignedweightregularizersforpruningpretrainedneuralnetworks/e672d957-667e-4af7-808d-b371f76f81f2_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bd5b8c96a3824b4c694e68f1f79c80309bbd2bddb922d64e4bf7079f76471c8
3
+ size 87572
alignedweightregularizersforpruningpretrainedneuralnetworks/e672d957-667e-4af7-808d-b371f76f81f2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d26b12fb03de65150868f2f4e00291a2933480f469f25d2642aa6da899808439
3
+ size 1167330
alignedweightregularizersforpruningpretrainedneuralnetworks/full.md ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Aligned Weight Regularizers for Pruning Pretrained Neural Networks
2
+
3
+ James O'Neil and Sourav Dutta and Haytham Assem
4
+ Huawei Research Center, Dublin, Ireland
5
+
6
+ {james.o.neil,sourav.dutta2,haytham.assem}@huawei.com
7
+
8
+ # Abstract
9
+
10
+ While various avenues of research have been explored for iterative pruning, little is known what effect pruning has on zero-shot test performance and its potential implications on the choice of pruning criteria. This pruning setup is particularly important for cross-lingual models that implicitly learn alignment between language representations during pretraining, which if distorted via pruning, not only leads to poorer performance on language data used for retraining but also on zero-shot languages that are evaluated. In this work, we show that there is a clear performance discrepancy in magnitude-based pruning when comparing standard supervised learning to the zero-shot setting. From this finding, we propose two weight regularizers that aim to maximize the alignment between units of pruned and unpruned networks to mitigate alignment distortion in pruned cross-lingual models and perform well for both non zero-shot and zero-shot settings. We provide experimental results on cross-lingual tasks for the zero-shot setting using XLM-RoBERTa<sub>Base</sub>, where we also find that pruning has varying degrees of representational degradation depending on the language corresponding to the zero-shot test set. This is also the first study that focuses on cross-lingual language model compression.
11
+
12
+ # 1 Introduction
13
+
14
+ Deep neural networks (DNNs) have grown increasingly large in the recent years. This has led to models requiring more storage requirements, more resources for training and inference (e.g., GPUs and TPUs), longer compute times and larger carbon footprints. This is largely due to the rise of masked self-supervised learning (SSL) which trains DNNs (e.g., Transformers in NLP) on a large collection of samples that do not have task labels but instead use a subset of the inputs as labels. Given the aforementioned challenges, it has become more difficult for machine learning practitioners to use these SSL
15
+
16
+ pretrained models for fine-tuning on downstream tasks. While training tricks such as effective batch sizes, gradient accumulation and dynamic learning rate schedules (Howard and Ruder, 2018) have improved the efficiency of fine-tuning DNNs under resource constraints, it can still come at a cost, e.g. gradient accumulation leads to less updates.
17
+
18
+ Pruning (LeCun et al., 1990; Reed, 1993) is a type of model compression method (Bucilua et al., 2006) that aims to address these shortcomings by zeroing out a subset of weights in the DNN, while maintaining performance close to the original model. Retraining is often carried out directly after each pruning step to recover from pruning induced performance drops. This process is referred to as iterative pruning. Although, iterative pruning has been extensively studied in the SSL setting (Hassibi and Stork, 1993; Han et al., 2016; Ding et al., 2018) and the transfer learning setting (Molchanov et al., 2016; Gordon et al., 2020; Sanh et al., 2020), little is known about pruning DNNs in the zero-shot setting<sup>1</sup> where a model is required to make predictions on a set of samples from classes that are unobserved during training. One salient example is pretrained cross-lingual language models (XLMs) (Conneau and Lample, 2019; Conneau et al., 2020a) whereby the model is trained with a masked/translation language model (MLM/TLM) objective to predict tokens for a large set of different languages whereby the objective forces the XLM model to learn similar representations for different languages. After cross-lingual pretraining, the model is further fine-tuned to a downstream task in one language (e.g., English) and then evaluated on different languages in the zero-shot setting (e.g., Spanish, French, Chinese, etc.). In this context, applying current pruning methods can damage the
19
+
20
+ XLM cross-lingual alignment that has been learned during pretraining. Ideally, we would aim to prune XLMs in such a way that avoids this alignment distortion which effects the zero-shot performance of pruned XLMs. Additionally, overfitting to the language used for fine-tuning becomes more of an issue due to the progressive reduction in parameters throughout iterative pruning as the remaining weights are relatively large, moving away from an "aligned" XLM state.
21
+
22
+ This is an important problem to address as the application of large pretrained models in the zero-shot-setting for natural language and other modalities (e.g. images and audio) is of practical importance e.g., using XLMs in production for multiple languages by only requiring annotations in a single language for fine-tuning, making predictions on unseen classes at test time from pretrained visual representations (Bucher et al., 2017) using only semantic descriptions (i.e., label similarity to known classes) or zero-shot predictions in pretrained multimodal models such as CLIP (Radford et al., 2021).
23
+
24
+ Hence, this work addresses the alignment distortion pruning problem by introducing AlignReg, a class of weight regularizers for magnitude-based pruning that force pruned models to have parameters that point in a similar direction or have a similar distribution to the parameters of the original pretrained network. To our knowledge, this is the first study on how iteratively pruned models perform in the zero-shot setting and how the solution differs from solutions found in the non-zero shot setting. We believe our findings have a strong practical implication as well-established pruning criteria may not be suitable given the observed discrepancy between zero-shot performance and the typically reported non-zero shot performance. Moreover, our proposed weight regularizer improves overall pruning generalization in zero-shot cross-lingual transfer. Below, we summarize our contributions.
25
+
26
+ - The first analysis of pruning cross-lingual models, how this effects zero-shot cross-lingual transfer and performance differences to pruning in the SSL setup.
27
+ - A weight regularizer that mitigates alignment distortion by minimizing the layer-wise Frobenius norm or unit similarity between the pruned model and unpruned model, avoiding overfitting to single language task fine-tuning.
28
+ - A post-analysis of weight distributions after
29
+
30
+ pruning and how they differ across module types in Transformers.
31
+
32
+ # 2 Related Work
33
+
34
+ Below we describe regularization-based pruning, other non-magnitude based pruning and how masked language modeling (MLM) implicitly learns to align cross-lingual representations.
35
+
36
+ Regularization-based pruning. Pruning can be achieved by using a weight regularizer that encourages network sparsity. Three well-established regularizers are $L_{0}$ (Louizos et al., 2018), $L_{1}$ regularization (Liu et al., 2017; Ye et al., 2018) and the commonly used $L_{2}$ regularization for weight sparsity (Han et al., 2015, 2016). Wang et al. have proposed an $L_{2}$ regularizer that increases in influence throughout retraining and shows the increasing regularization improves pruning performance. For structured pruning where whole blocks of weights are removed, Group-wise Brain Damage (Lebedev and Lempitsky, 2016) and SSL (Wen et al., 2016) propose to use Group LASSO (Yuan and Lin, 2006) to learn structured solutions.
37
+
38
+ Importance-based pruning. Magnitude-based pruning (MBP) relies on the assumption that weight or gradient magnitudes have correlation with its importance to the overall output of the network. Mozer and Smolensky instead use a learnable gating mechanism that approximates layer importance, finding that weight magnitudes reflect importance statistics. To measure weight importance as the difference in loss between pruned and unpruned network, LeCun et al. approximate this difference with a Taylor series up to the second order. This involves the product of the gradient and weight magnitude in the 1st term and an approximation of the Hessian and the square of the weight magnitude for the second term. However, computing the Hessian and even its approximations (LeCun et al., 1990; Hassibi and Stork, 1993; Dong et al., 2017; Wang et al., 2019; Singh and Alistarh, 2020) can significantly slow down retraining. In our work, we avoid the requirement of computing the Hessian or approximations thereof, as it is not scalable for models such as XLM-R (Conneau et al., 2020a). Park et al. have extended MBP to block approximations to avoid pruning lowest weight magnitudes that may be connected to weights in adjacent layers that have high weight magnitude. Lee et al. have provided a method to automatically choose the sparsity of layers by using the rescaled version
39
+
40
+ of weight magnitude to incorporates the model-level distortion incurred by pruning.
41
+
42
+ Implicit Alignment in Pretrained MLMs In context of multi-task learning, Chen et al. (2020) minimize the mean squared error between pretrained weights and weights being learned for a set of different source tasks to avoid catastrophic forgetting in the continual learning setting. Conneau et al. (2020b) have found that multilingual MLM (i.e training with an MLM objective with concatenated text for multiple languages) naturally leads to models with strong cross-lingual transfer capabilities. Additionally, they find that this is also found for monolingual models that do not share vocabulary across monolingual corpora and the only requirement is that weight sharing is used in the top layers of the multi-lingual encoder. In the context of our work, we want to bias our fine-tuned and iteratively pruned model to have similar geometric properties and symmetries to these pretrained MLMs to preserve zero-shot cross-lingual transfer.
43
+
44
+ # 3 Methodology
45
+
46
+ In this section, we describe how our proposed AlignReg weight regularizers can improve pruning performance in both supervised learning and zero-shot pruning settings. We focus on two regularizers, namely, a neuron correlation-based regularizer (cosine-MBP) and Frobenius layer-norm regularizer (frobenius-MBP).
47
+
48
+ Let $\mathcal{D} := \{X_i, y_i\}_{i=1}^D$ where each $X_i$ of $D$ training samples consists of a sequence of vectors $X_i := (x_1, \ldots, x_n)$ and $\boldsymbol{x}_i \in \mathbb{R}^d$ (e.g., $d = 512$ ). For structured prediction (e.g., NER and POS), $y_i \in \mathbb{R}^{n \times c}$ and for single and pairwise sentence classification, $y_i \in \mathbb{R}^c$ where $c$ is the number of classes. Let $\theta = (\theta_1, \ldots, \theta_L)$ be the parameters of a pretrained network $f$ with $L$ layers, where $\theta_l$ refers to the parameters, including weight matrix $\mathbf{W}_l$ and bias $b_l$ , at layer $l$ . Let $f_{\tilde{\theta}}$ be a network with parameters $\tilde{\theta}$ consisting of weights $\tilde{\mathbf{W}}_l \in \mathbb{R}^{N_{l-1} \times N_l}$ and bias $\tilde{\boldsymbol{b}}_l \in \mathbb{R}^{N_l}$ where $N_l$ is the number of units in the $l$ -th layer. Here, $\tilde{\mathbf{W}}_l := \mathbf{W}_l \mathbf{M}_l$ where $\mathbf{M}$ is the pruned mask. For MBP (Karnin, 1990) we remove weights of $\mathbf{W}_l$ , $\forall l \in L$ with the smallest absolute weight magnitude until a specified percentage $p$ of weights are removed. Note that this is a layer-wise process and requires the pruned weights to be masked with $\mathbf{M}_l$ which has 0 entries corresponding to weights to be pruned and 1 entries for unpruned weights
49
+
50
+ $\mathbf{W}_l$ . Global MBP can also be used whereby the weights $\{\mathbf{W}_l\}_{l=1}^L$ are first vectorized and concatenated prior to choosing $p$ lowest weight magnitudes. Unlike layer-wise MBP, the percentage of weights removed in each layer can vary for global-MBP. Typically, weight regularization is used with MBP to encourage weight sparsity. Thus the objective for iterative pruning can be expressed as,
51
+
52
+ $$
53
+ \mathcal {L} _ {\theta} := \frac {1}{D} \sum_ {i = 1} ^ {D} \ell_ {c e} \left(f _ {\tilde {\theta}} \left(\mathbf {X} _ {i}\right), \boldsymbol {y} _ {i}\right) + \lambda | | \tilde {\theta} | | _ {0} \tag {1}
54
+ $$
55
+
56
+ where $\lambda$ controls the influence of the weight magnitude regularization. We now describe our proposed AlignReg.
57
+
58
+ # 3.1 AlignReg - Pruning-Aware Regularization
59
+
60
+ AlignReg can be used to align weights unit-wise or layer-wise between unpruned and pruned networks. We initially discuss the cosine-MBP regularizer.
61
+
62
+ cosine-MBP aims to preserve the inherent crosslingual alignment, during iterative pruning, by minimizing the angle between parameter vectors of the same unit in the pruned and unpruned network. The intuition is that cross-lingual alignment relies more on parameter vector direction than vector magnitudes. Moreover, as the network is being pruned, the weights will consequently change weight magnitude to account for the information loss. To apply AlignReg to linear layers within Transformers, we compute the pairwise cosine similarity between pairs of pruned weights $\tilde{\mathbf{W}}_l\subset \tilde{f}$ and unpruned weights $\mathbf{W}\subset f$ for all $l$ -th layers. For $\mathbf{W}_l\in \mathbb{R}^{N_{l - 1}\times N_l}$ of the $l$ -th layer, the average weight correlation is
63
+
64
+ $$
65
+ \rho \left(\tilde {\mathbf {W}} _ {l}, \mathbf {W} _ {l}\right) = \frac {1}{N _ {l}} \sum_ {i = 1} ^ {N _ {l}} \frac {\left| \mathbf {W} _ {l i} ^ {\top} \tilde {\mathbf {W}} _ {l i} \right|}{\left| \left| \mathbf {W} _ {l i} \right| \right| _ {2} \left| \left| \tilde {\mathbf {W}} _ {l i} \right| \right| _ {2}} \tag {2}
66
+ $$
67
+
68
+ where $\mathbf{W}_{li}$ is $i$ -th column of the matrix corresponding to the $i$ -th unit of the $l$ -th layer. Intuitively, $\rho (\mathbf{W}_l,\tilde{\mathbf{W}}_l)$ is the average cosine similarity between weight vectors of the same unit at the $l$ -th layer of the pruned and unpruned network. Adding AlignReg to the objective results in Equation (3),
69
+
70
+ $$
71
+ \mathcal {L} _ {\theta} := \ell_ {c e} \left(f _ {\tilde {\theta}} (\mathbf {X}), \boldsymbol {y}\right) - \frac {\lambda}{L} \sum_ {l} ^ {L} \rho \left(\tilde {\mathbf {W}} _ {l}, \mathbf {W} _ {l}\right) \tag {3}
72
+ $$
73
+
74
+ where $\lambda \in [0,\infty)$ controls the importance of AlignReg relative to the main cross-entropy loss $\ell_{ce}(\cdot ,\cdot)$ . The gradient of the loss w.r.t to $\theta$ is then
75
+
76
+ # Algorithm 1: AlignReg Pruning
77
+
78
+ 1: Input: Weight tensors $\mathbf{W}_1, \ldots, \mathbf{W}_L$ of a finetuned network, $p$ percentage of weights to remove per layer
79
+ 2: Output: Pruned weight tensors $\tilde{\mathbf{W}}_1, \ldots, \tilde{\mathbf{W}}_L$
80
+ 3: for $l = 1, \dots, L$ do
81
+ 4: Compute $\rho (\mathbf{W}_l,\mathbf{W}_l)$ with Eq.2.
82
+ 5: Set $\tilde{\mathbf{W}}_{s_i}$ as $s_l$ -th smallest element of $\tilde{\mathbf{W}}$
83
+ 6: Set $\mathbf{M}_l\gets \mathbb{1}\{\mathbf{W}_l - \tilde{w}_{s_l}\geq 0\}$
84
+ 7: Set $\bar{\mathbf{W}}_l\gets \mathbf{M}_l\odot \mathbf{W}_l$
85
+ 8: end for
86
+ 9: Compute $\mathcal{L}_{\theta}$ according to Eq.3
87
+
88
+ expressed as equation (4),
89
+
90
+ $$
91
+ \nabla_ {\theta} \mathcal {L} _ {\theta} := \nabla_ {\tilde {\theta}} \ell_ {c e} \left(f _ {\tilde {\theta}} (\mathbf {X}), \boldsymbol {y}\right) - \frac {\lambda}{L} \sum_ {l} ^ {L} \frac {\partial \rho \left(\tilde {\mathbf {W}} _ {l} , \mathbf {W} _ {l}\right)}{\partial \tilde {\mathbf {W}} _ {l}} \tag {4}
92
+ $$
93
+
94
+ where $\frac{\partial\rho(\tilde{\mathbf{W}}_l,\mathbf{W}_l)}{\partial\tilde{\mathbf{W}}_l}$ is a function of the 2-norm of the matrices in $\mathbf{W}_l$ . For the element $\mathbf{W}_{l,(i,j)}$ of $i$ -th row and $j$ -th column in $\mathbf{W}_l$ , we have
95
+
96
+ $$
97
+ \begin{array}{l} \frac {\partial \rho (\tilde {\mathbf {W}} _ {l} , \mathbf {W} _ {l})}{\partial \tilde {\mathbf {W}} _ {l , (i , j)}} = \frac {1}{N _ {l} - 1} \sum_ {j = 1} ^ {N _ {l}} \left(\operatorname {s i g n} \left(\mathbf {W} _ {l, (j)} ^ {\top} \tilde {\mathbf {W}} _ {l, (j))}\right) \right. \tag {5} \\ \left. \left[ \frac {\tilde {\mathbf {W}} _ {l , (i , j)}}{| | \mathbf {W} _ {l , (j)} | | _ {2} | | \tilde {\mathbf {W}} _ {l , (j)} | | _ {2}} - \frac {\mathbf {W} _ {l , (i , j)} \mathbf {W} _ {l , (j)} ^ {\top} \tilde {\mathbf {W}} _ {l , (j)}}{| | \mathbf {W} _ {l , (j)} | | _ {2} ^ {3} | | \tilde {\mathbf {W}} _ {l , (j)} | | _ {2}} \right]\right) \\ \end{array}
98
+ $$
99
+
100
+ where $\mathbf{W}_{l,(j)}$ and $\tilde{\mathbf{W}}_{l,(j)}$ are $j$ -th column in $\mathbf{W}_l$ and $\tilde{\mathbf{W}}_l$ , respectively. Thus, this regularization favors solutions with high cosine similarity between units of pruned and unpruned networks. We also consider a layer-wise $\rho (\mathbf{W},\tilde{\mathbf{W}})$ that relaxes the unit-level alignment to whole layers. This is partially motivated due to the fact neural networks can exhibit similar output activation behavior even when neuron weights have been permuted within the layer (Brea et al., 2019). To perform this we simply apply Equation (2) with vectorized weights $\rho (\mathrm{vec}(\tilde{\mathbf{W}}_l),\mathrm{vec}(\mathbf{W}_l))$ and the subsequent partial derivatives in Equations (4) and (5) are applied for updating $\tilde{\mathbf{W}}_l$ . In our experiments we did not see a significant difference using vectorized weights and thus use unit-wise cosine similarity.
101
+
102
+ Algorithm 1 shows how AlignReg is applied for a single mini-batch update during an iterative pruning epoch.
103
+
104
+ Relaxing Unit-Wise AlignReg To A Layer-Wise Frobenius Distortion Formulation Thus far we have described the application of cosine similarity as a measure of similarity between unpruned and
105
+
106
+ pruned weights of the same units. However, this may be a strict constraint, particularly at high compression rates where the remaining weights for a unit are forced to have higher norms to allow zeroed weights. Hence, an alternative measure is the layer-wise Frobenius norm (Frobenius-MBP) regularizer based on the difference between weights $||\mathbf{W} - \tilde{\mathbf{W}} ||_F$ . MBP itself can be viewed in terms of minimizing the Frobenius distortion (Han et al., 2016; Dong et al., 2017) as $\min_{\mathbf{M}:||\mathbf{M}||_0 = p}||\mathbf{W} - \mathbf{M}\odot \mathbf{W}||_F$ where $\odot$ is the Hadamard product, $||\cdot ||_0$ denotes the entrywise 0-norm, and $p$ is a constraint of the number of weights to remove as a percentage of the total number of weights for that layer. In the zero-shot setting, we need to account for out-of-distribution Frobenius distortions, such as alignment distortion in XLM due to pruning and overfitting to a single language. Taking the view of Frobenius distortion minimization when using our weight regularizer, we reformulate it to include Frobenius-MBP as,
107
+
108
+ $$
109
+ \min _ {\mathbf {M}: | | \mathbf {M} | | _ {0} = p} \left[ | | \mathbf {W} - \mathbf {M} \odot \mathbf {W} | | _ {F} ^ {2} + \lambda | | \mathbf {W} ^ {T} - \mathbf {M} \odot \mathbf {W} | | _ {F} ^ {2} \right] \tag {6}
110
+ $$
111
+
112
+ where $\mathbf{W}^T$ are the weights from the pretrained model prior to fine-tuning that is cross-lingually aligned from the masked language modeling (MLM) pretraining objective. In our experiments, $\lambda = 5\times 10^{-4}$ .
113
+
114
+ frobenius-MBP Implicitly Aligns Eigenvectors To explicitly show that the Frobenius distortion minimization aligns fine-pruned and pretrained parameter vectors we expect their eigenvectors to also be close. We can use the Eckart-Young-Mirsky Theorem (Golub et al., 1987) to express Frobenius distortion minimization as Equation 7,
115
+
116
+ $$
117
+ \left\| \mathbf {W} ^ {T} - \mathbf {M} \odot \mathbf {W} \right\| _ {F} ^ {2} = \left\| \boldsymbol {\Sigma} - \mathbf {U} ^ {\top} \mathbf {M} \odot \mathbf {W V} \right\| _ {F} ^ {2} (7)
118
+ $$
119
+
120
+ where the unitary invariance under the 2-norm that $\mathbf{U},\mathbf{V}$ vanishes and singular value matrix is left to approximate $\mathbf{W}^T$ , hence the inclusion of $\boldsymbol{\Sigma}$ .We express $\mathbf{X} = \mathbf{U}_k\Sigma_k^{12}$ $\mathbf{Y} = \Sigma_k^{12}\mathbf{V}_k^\top$ and $\mathbf{XY} = \mathbf{A}_k$ Hence, we can further describe the minimization as $||\Sigma -\mathbf{U}^{\top}\mathbf{W}_{k}^{T}\mathbf{V}||_{F}^{2}$ and since $\mathbf{X},\mathbf{Y}$ are unitary, $||\Sigma -\Sigma_k||_F^2$
121
+
122
+ # 3.2 Connections to Knowledge Distillation
123
+
124
+ Knowledge distillation (KD) works by using outputs of the last layer (Hinton et al., 2015) or intermediate layers (Romero et al., 2015) as additional soft targets. AlignReg regularizers instead
125
+
126
+ ![](images/81f04101c1f502a7be1985bf0816d992e6b23ff4ce7f3431267a15268d0f8e8d.jpg)
127
+ Figure 1: English and Zero-Shot Test Accuracy on News Classification.
128
+
129
+ operate directly on minimizing a divergence or distance between weight tensors as opposed to their corresponding output activations. Hence, AlignReg does not necessarily need training data as it operates directly on aligning weight tensors. Since the networks that are used for alignment are architecturally identical, we can show that maximizing weight similarity is equivalent to minimizing distance between their corresponding output activations (Romero et al., 2015) when the norm of input $Z$ is smaller than the output range of $\sigma$ . For our experiments, we use XLM-RoBERTaBase which contain Gaussian Linear Error Unit (GeLU) activation functions, which can be formulated as $\sigma(\mathbf{Z}_{li}) := \mathbf{Z}_{li} / 2(1.0 + \mathrm{erf}(\mathbf{Z}_{li} / \sqrt{2.0}))$ where erf is an error function, $\sigma(\cdot)$ is a monotonic activation function and $\mathbf{Z}_{li}$ is the input vector. The GELU activation has the properties that for $\mathbf{Z}_{li} > 0$ it is equivalent to the ReLU activation and $\mathbf{Z}_{li} \leq 0$ it tends to -1. For $\mathbf{Z}_{li} > 0$ , $||\mathbf{Z}_{li}||_2 \leq 1$ and a monotonic piecewise linear function $\sigma(\cdot)$ , the inequality in Equation 8 holds.
130
+
131
+ $$
132
+ \left| \left| \mathbf {W} _ {l i} - \mathbf {M} _ {l i} \odot \mathbf {W} _ {l i} \right| \right| _ {F} \leq \tag {8}
133
+ $$
134
+
135
+ $$
136
+ \left| \left| \sigma \left(\mathbf {Z} _ {l} \mathbf {W} _ {l i}\right) - \sigma \left(\mathbf {Z} _ {l i} \mathbf {M} _ {l i} \odot \mathbf {W} _ {l i}\right) \right| \right| _ {F}
137
+ $$
138
+
139
+ Layer normalization leads to features having zero mean and unit variance and hence $||\mathbf{Z}_{li}||_2 \leq 1$ . Hence, minimizing the Frobenius distortion of pruned and unpruned weights is equivalent to minimizing the mean squared error (MSE) between output activations, as is the knowledge distillation method used for FitNets (Romero et al., 2015). In contrast, KD using FitNets encourages the student network to have activation outputs that are the same as the teacher with permutation invariance on the units incoming weights, not restricting the weights to be similar. Unlike KD, this minimization can be performance without any data.
140
+
141
+ # 4 Experimental Setup
142
+
143
+ Datasets. We perform experiments on multilingual tasks from the XGLUE benchmark (Liang et al., 2020) with pretrained XLM-R<sub>Base</sub>. This covers pairwise classification (QAM, QADSM, WPR, XNLI), sentence classification (NC) and structured prediction (NER and POS) tasks.
144
+
145
+ Iterative Pruning Details. Texts are tokenized using the SentencePiece BPE tokenizer (Sennrich et al., 2016) with a vocabulary of 250K tokens. For structured prediction tasks (POS and NER), a single layer feed-forward (SLFF) token-level classifier is used on top of XLM- $\mathbf{R}_{\mathrm{Base}}$ and for sentence-level task a SLFF sentence-level classifier is used. The batch size is 32, the learning rate is $5 \cdot 10^{-6}$ and the maximum sequence length is set to 256 for all tasks, except for POS in which we use a learning rate of $2 \cdot 10^{-5}$ with the adam optimizer (Kingma and Ba, 2015) with weight decay (AdamW) and a max sequence length of 128. We carry out a pruning step after each 15 training epochs, uniformly pruning $10\%$ of the parameters at each pruning step. We omit the pruning of embedding layers, layer normalization parameters and the classification layer as they account for a relatively small number of the total parameter count ( $< 1\%$ ) and play an important role in XLM generalization. Although prior work has suggested non-uniform pruning schedules (e.g., cubic schedule (Zhu and Gupta, 2017)), we did not see major differences to uniform pruning in preliminary experiments. Each task is trained with English data only and evaluated on all available languages for that task. Hence, we expect the percentage of achievable compression to be lower as performance in the zero-shot cross-lingual setting to be more difficult than the monolingual setting (e.g., GLUE tasks).
146
+
147
+ ![](images/543c8329d04947751cf1ce51a599e6ad5cdc5a9497d6ef76d59b1d7750a9e491.jpg)
148
+ Figure 2: Zero-Shot Test F1 on Named Entity Recognition.
149
+
150
+ ![](images/df01ef8b8610debf2d6a3d4f4e421a03e1e8c3767454f85187d1b1c6babba2da.jpg)
151
+ Figure 3: Question Answer Matching Test Accuracy.
152
+
153
+ Pruning Baselines. Below lists our pruning baselines. Random Pruning (1997) - weights are pruned uniformly at random across all layers to a chosen fraction. Layer-wise Magnitude Pruning (MBP) (Janowsky, 1989; Mozer and Smolensky, 1989) - for each layer, weights with the lowest absolute value (LAV) are pruned. Layer-wise Gradient Magnitude Pruning (Sun et al., 2017) - for each layer, prunes the weights with LAV of the accumulated gradients evaluated on a batch of inputs. Global Magnitude Pruning (Global-MBP) (Karnin, 1990) - prunes weights with LAV anywhere in the DNN. $L_{0}$ norm MBP (Louizos et al., 2018) - uses non-negative stochastic gates that choose which weights are set to zero as a smooth approximation to the non-differentiable $L_{0}$ -norm. Lookahead pruning (LAP) (Park et al., 2019) - prunes paths that have smallest weight magnitude across blocks of layers, unlike MBP which treats layers independently. Layer-Adaptive Magnitude Pruning (LAMP) (Lee et al., 2020) adaptively sets the pruning ratio of each layer.
154
+
155
+ # 5 Empirical Results
156
+
157
+ We now discuss results on the XGLUE tasks.
158
+
159
+ News Classification (NC) Figure 1 shows the results on news classification where a category for news article is predicted and evaluated in 5 languages and trained and iteratively pruned on English text. Firstly, we observe the trend in iterative pruning performance degradation is somewhat volatile. From preliminary experiments we found news classification to require only 3 epochs to converge for standard fine-tuning on XLM-RoBERTa<sub>Base</sub>. We find that this task is relatively "similar" to the pretraining task and therefore able to easier recover from pruning steps. Overall, both Cosine-MBP and Frobenius-MBP consistently lead to the best zero-shot test performance across both pruning steps and languages.
160
+
161
+ Question Answer Matching (QAM) Figure 3 shows the test accuracy on English and the zero-shot test accuracy on French and German for Question-Answer Matching (QAM). This involves predicting whether a question is answered correctly or not given a question-answer pair. We find that Frobenius-MBP and Cosine-MBP maintain higher accuracy across multiple pruning steps, outperforming baselines. More generally, we see there is close to $2\%$ drop in average test accuracy drop in French and German when compared to testing on samples from the same language used in training.
162
+
163
+ Named Entity Recognition (NER) The Named Entity Recognition (NER) cross-lingual dataset is made up of CoNLL-2002 NER and CoNLL-2003 NER (Sang and De Meulder, 2003), covering English, Dutch, German and Spanish with 4 named entities. From Figure 2 we find that cross-lingual transfer of pruned models is most difficult in German and Dutch, which both come from the same language family, sharing commonalities such as word order and having similar vocabularies. The primary reason for the difficulty in maintaining per
164
+
165
+ ![](images/c61f1d3af4c56968d3addcb8d75471d2f375eaefe19b76d644b2c9ecfdba300d.jpg)
166
+ Figure 4: Part of Speech Tagging Test Accuracy.
167
+
168
+ ![](images/1cc075e13e99924ec9ba46a66ffee2d7960331d1df26caa15a53edaebc5f7e46.jpg)
169
+ Figure 5: Web-Page Ranking Test Matthew's Correlation Coefficient.
170
+
171
+ formance in high compression rates for this NER dataset is that there is only 15k training samples, being significantly lower than the remaining XGLUE tasks (the majority contains 100k training samples). Thus, not only is there less training data to recover directly after each pruning step, but the pruning step interval itself is shorter. In contrast, English test performance is close to the original performance up until $25\%$ of remaining weights, unlike the remaining languages. We find that gradient-MBP eventually overtakes MBP approaches past $20\%$ remaining weights. However accuracy has reduced too much at this compression level. We find that Cosine-MBP and Frobenius-MBP weight regu
172
+
173
+ larizers achieve the best performing pruned model performance above $20\%$ remaining weights, with Lookahead pruning and $L_{0}$ regularized MBP being competitive in zero-shot performance.
174
+
175
+ Part of Speech Tagging (POS) The Part of Speech (PoS) tagging dataset consists of a subset of the Universal Dependencies treebank (Nivre et al., 2020) and covers 18 languages. In Figure 4, we see both Cosine-MBP and Frobenius-MBP tend to outperform baselines, although $L_{0}$ -based pruning (Louizos et al., 2018) has similar performance to Cosine-MBP for zero-shot accuracy. There is also a clear discrepancy between SSL accuracy (English) versus zero-shot accuracy (Average), the latter following closer to linear decay after $40 - 50\%$ of weights remaining. Generally, both Cosine-MBP and Frobenius-MBP outperform baselines with the exception of Thai and Urdu at higher compression rates ( $< 40\%$ ), both being some of the most under-resourced languages of all 18 languages.
176
+
177
+ Web Page Ranking aims to predict whether a web page is relevant (1-5 ratings, "bad" to "perfect") to an input query and it is evaluated for 7 languages using the Normalized Discounted Cumulative Gain (nDCG). From Figure 5, we see that between the $15\% - 45\%$ region the average zero-shot performance degrades faster than the English language used for training. In contrast, semantically and syntactically different languages from English, such as Chinese, already suffer from loss of alignment due to pruning as the performance gap between proposed methods (and baselines) and random pruning is shortened.
178
+
179
+ ![](images/8e03fbb891ec931b1e138fb13c919bf4084e45df6773306483e873a6f2ab7a12.jpg)
180
+ Figure 6: Zero-Shot XNLI Results Per Language After Iteratively Fine-Pruning XLM-RoBERTa<sub>Base</sub>
181
+
182
+ Cross-Lingual Natural Language Inference (XNLI) Figure 6 shows the zero-shot cross-lingual transfer for various unstructured pruning methods. We find that both the accuracy on the English test (i.e SSL generalization) and the average zero-shot test accuracy are consistently improved using Cosine-MBP and Frobenius-MBP, outperforming $L_{0}$ pruning, Lookahead pruning and LAMP. We find that morphologically rich languages such as Arabic, Swahili and Turkish degrade in performance linearly once performance begins to drop after $60\%$ of the remaining weights are pruned. This trend is roughly followed for all MBP-based pruning methods. Additionally, test accuracy on English can be maintained within $10\%$ accuracy drop of the original test accuracy up to $20\%$ of remaining weights for MBP, while Swahili can only be within a $10\%$ accuracy drop up to $55\%$ of the remaining weights. Hence, iterative pruning in the zero-shot setting leads to faster performance degradation for languages that are typologically or etymologically further from the language used for fine-tuning.
183
+
184
+ When comparing, English and the average zero-shot test accuracy we see that the slope is steeper after the inflection point² for all pruning methods, not to mention the greater than $10\%$ accuracy drop across pruning steps.
185
+
186
+ XGLUE Average Result Finally, in Table 1 we show the overall and average task understanding
187
+
188
+ scores on the XGLUE benchmark for our proposed AlignReg weight regularizer and the pruning baselines. We find that the use of AlignReg Cosine-MBP and Frobenius-MBP better preserves cross-lingual alignment during model pruning, thereby outperform other MBP baselines, including LAMP and Lookahead pruning, based on improved zero-shot cross-lingual performance.
189
+
190
+ ![](images/e07850e69308d688a7445b9e183c81b7591a19592dc83f19f862087da2cd213b.jpg)
191
+ Figure 7: Pruned Model Weight Norms Per Layer
192
+
193
+ Discussion From our experiments, we found that layer-wise pruning tends to outperform global pruning. This can be explained by the clear discrepancy between weight norms of different layer types within each self-attention block. Global pruning chooses the majority of weights to prune from the layer type that has the smallest norm, leading to an information bottleneck, or layer collapse (Lee et al., 2018) for very high compression rates. This effect
194
+
195
+ <table><tr><td>Prune Method</td><td>XNLI</td><td>NC</td><td>NER</td><td>PAWSX</td><td>POS</td><td>QAM</td><td>QADSM</td><td>WPR</td><td>Avg.</td></tr><tr><td>No Pruning</td><td>73.48</td><td>80.10</td><td>82.60</td><td>89.24</td><td>80.34</td><td>68.56</td><td>68.06</td><td>73.32</td><td>76.96</td></tr><tr><td>Random</td><td>51.22</td><td>70.19</td><td>38.19</td><td>57.37</td><td>52.57</td><td>53.85</td><td>52.34</td><td>70.69</td><td>55.80</td></tr><tr><td>Global-Random</td><td>50.97</td><td>69.88</td><td>38.30</td><td>56.74</td><td>53.02</td><td>54.02</td><td>53.49</td><td>69.11</td><td>55.69</td></tr><tr><td>L0-MBP</td><td>64.75</td><td>78.98</td><td>56.22</td><td>72.09</td><td>71.38</td><td>59.31</td><td>53.35</td><td>71.70</td><td>65.97</td></tr><tr><td>L2-MBP</td><td>64.30</td><td>78.79</td><td>54.43</td><td>77.99</td><td>70.68</td><td>59.24</td><td>60.33</td><td>71.52</td><td>67.16</td></tr><tr><td>L2-Global-MBP</td><td>64.17</td><td>78.64</td><td>54.47</td><td>75.51</td><td>72.27</td><td>59.26</td><td>60.10</td><td>71.50</td><td>66.99</td></tr><tr><td>L2-Gradient-MBP</td><td>61.11</td><td>73.77</td><td>53.25</td><td>79.56</td><td>65.89</td><td>57.35</td><td>59.33</td><td>71.59</td><td>65.23</td></tr><tr><td>Lookahead</td><td>60.84</td><td>79.18</td><td>54.44</td><td>71.05</td><td>68.76</td><td>55.94</td><td>53.41</td><td>71.26</td><td>64.36</td></tr><tr><td>LAMP</td><td>58.04</td><td>63.64</td><td>51.92</td><td>66.05</td><td>67.43</td><td>55.36</td><td>52.42</td><td>71.09</td><td>60.74</td></tr><tr><td>Cosine-MBP</td><td>66.20</td><td>79.15</td><td>55.62</td><td>78.45</td><td>71.62</td><td>57.56</td><td>61.37</td><td>72.51</td><td>67.81</td></tr><tr><td>Frobenius-MBP</td><td>65.71</td><td>79.84</td><td>55.61</td><td>78.78</td><td>71.62</td><td>61.62</td><td>61.37</td><td>71.48</td><td>68.25†</td></tr></table>
196
+
197
+ Table 1: Overall XGLUE Score for Iterative Pruning of XLM-RBase @ 31% Remaining Weights.
198
+
199
+ is due to layer normalization being applied after query, key and value (QKV) parameters, rescaling features such that weight magnitudes remain low. Hence, this motivates why we have focused on the application of AlignReg to layer-wise MBP. This is reflected in Figure 7 which shows the weight norm by layer type for each layer for MBP. We see that QKV weight values are distinctly higher than the remaining fully-connected layers (attention output layer, intermediate position-wise feedforward layer and the blocks output layer), with the exception that the output attention layer norm becomes higher between layer 3-8.
200
+
201
+ ![](images/b275fef2c86021bca7ec8fb692a58b27bfb0af54cf2b10e9e41b1afa033eb2a7.jpg)
202
+ Figure 8: Class Separability Between Class Representations At Each Iterative Pruning Step on PAWSX.
203
+
204
+ For the majority of tasks, the rate of performance drop for zero-shot test performance occurs close to $30\%$ of remaining weights. This is consistent for all pruning methods and therefore the focus of our analysis has been around this operating region.
205
+
206
+ We also note that the effect of MBP (including our AlignReg regularization-based MBP) on zero-shot performance for different languages heavily depends on the semantic distance of evaluated lan
207
+
208
+ guage to the single language used for training. For example, in Figure 6 Arabic, Bulgarian, Swahili and Hindi have the largest drops in test accuracy around $20 - 60\%$ remaining weights. Similarly Arabic, Thai and Hindi suffer most around $20\% - 60\%$ for PoS tagging in Figure 4. However, we also acknowledge this is partly reliant on the proportion of training data per language used during pretraining the underlying language model, in our case XLM- $\mathbf{R}_{\mathrm{Base}}$ .
209
+
210
+ Lastly, to show the representational degradation of pruned networks, in Figure 8 we visualize the class separability via a t-SNE plot of two principal components of the last hidden representation corresponding to the [CLS] token of an iteratively pruned XLM- $\mathbf{R}_{\mathrm{Base}}$ for PAWSX. Even from only two principal components of a single token input, we clearly see a change in class separability from $31\%$ to $28\%$ remaining weights, reflecting the lack of linear separation.
211
+
212
+ # 6 Conclusion
213
+
214
+ In this paper, we analysed iterative pruning in the zero-shot setting where a pretrained masked language model uses self-supervised learning on text from various languages but can only use a single language for downstream task fine-tuning. We find that some languages degrade in iterative pruning performance faster than others for some tasks (NER and XNLI) and propose a weight regularizer that biases the iteratively pruned model towards learning weight distributions close to the cross-lingually aligned pretrained state. This improves over well-established weight regularization methods for magnitude-based pruning in both the standard supervised learning setting and the zero-shot setting.
215
+
216
+ # References
217
+
218
+ Johanni Brea, Berlin Simsek, Bernd Illing, and Wulfram Gerstner. 2019. Weight-space symmetry in deep networks gives rise to permutation saddles, connected by equal-loss valleys across the loss landscape. arXiv preprint arXiv:1907.02911.
219
+ Maxime Bucher, Stéphane Herbin, and Frédéric Jurie. 2017. Generating visual representations for zero-shot classification. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 2666-2673.
220
+ Cristian Bucilua, Rich Caruana, and Alexandru Niculescu-Mizil. 2006. Model compression. In Proceedings of the 12th ACM SIGKDD international conference on Knowledge discovery and data mining, pages 535-541.
221
+ Sanyuan Chen, Yutai Hou, Yiming Cui, Wanxiang Che, Ting Liu, and Xiangzhan Yu. 2020. Recall and learn: Fine-tuning deep pretrained language models with less forgetting. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7870-7881, Online. Association for Computational Linguistics.
222
+ Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Édouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2020a. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8440-8451.
223
+ Alexis Conneau and Guillaume Lample. 2019. Crosslingual language model pretraining. In Advances in neural information processing systems, volume 32.
224
+ Alexis Conneau, Shijie Wu, Haoran Li, Luke Zettlemoyer, and Veselin Stoyanov. 2020b. Emerging cross-lingual structure in pretrained language models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6022-6034, Online. Association for Computational Linguistics.
225
+ Xiaohan Ding, Guiguang Ding, Jungong Han, and Sheng Tang. 2018. Auto-balanced filter pruning for efficient convolutional neural networks. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.
226
+ Xin Dong, Shangyu Chen, and Sinno Pan. 2017. Learning to prune deep neural networks via layer-wise optimal brain surgeon. Advances in Neural Information Processing Systems, 30.
227
+ G.H. Golub, Alan Hoffman, and G.W. Stewart. 1987. A generalization of the eckart-young-mirsky matrix approximation theorem. Linear Algebra and its Applications, 88-89:317-327.
228
+
229
+ Mitchell A Gordon, Kevin Duh, and Nicholas Andrews. 2020. Compressing bert: Studying the effects of weight pruning on transfer learning. arXiv preprint arXiv:2002.08307.
230
+ S Han, H Mao, and WJ Dally. 2016. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. In International Conference on Learning Representations.
231
+ Song Han, Jeff Pool, John Tran, and William Dally. 2015. Learning both weights and connections for efficient neural network. Advances in neural information processing systems, 28.
232
+ Babak Hassibi and David G Stork. 1993. Second order derivatives for network pruning: Optimal brain surgeon. Morgan Kaufmann.
233
+ Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the knowledge in a neural network. In NIPS Deep Learning and Representation Learning Workshop.
234
+ Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 328-339, Melbourne, Australia. Association for Computational Linguistics.
235
+ Steven A Janowsky. 1989. Pruning versus clipping in neural networks. Physical Review A, 39(12):6600.
236
+ Ehud D Karnin. 1990. A simple procedure for pruning back-propagation trained neural networks. IEEE transactions on neural networks, 1(2):239-242.
237
+ Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
238
+ Vadim Lebedev and Victor Lempitsky. 2016. Fast convnets using group-wise brain damage. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2554-2564.
239
+ Yann LeCun, John S Denker, and Sara A Solla. 1990. Optimal brain damage. In Advances in neural information processing systems, pages 598-605.
240
+ Jaeho Lee, Sejun Park, Sangwoo Mo, Sungsoo Ahn, and Jinwoo Shin. 2020. Layer-adaptive sparsity for the magnitude-based pruning. In International Conference on Learning Representations.
241
+ Namhoon Lee, Thalaiyasingam Ajanthan, and Philip HS Torr. 2018. Snip: Single-shot network pruning based on connection sensitivity. In International Conference on Learning Representations.
242
+
243
+ Yaobo Liang, Nan Duan, Yeyun Gong, Ning Wu, Fenfei Guo, Weizhen Qi, Ming Gong, Linjun Shou, Daxin Jiang, Guihong Cao, et al. 2020. Xglue: A new benchmark dataset for cross-lingual pre-training, understanding and generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6008-6018.
244
+ Zhuang Liu, Jianguo Li, Zhiqiang Shen, Gao Huang, Shoumeng Yan, and Changshui Zhang. 2017. Learning efficient convolutional networks through network slimming. In Proceedings of the IEEE International Conference on Computer Vision, pages 2736-2744.
245
+ Christos Louizos, Max Welling, and Diederik P Kingma. 2018. Learning sparse neural networks through 1_0 regularization. In International Conference on Learning Representations.
246
+ Pavlo Molchanov, Stephen Tyree, Tero Karras, Timo Aila, and Jan Kautz. 2016. Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440.
247
+ Michael C Mozer and Paul Smolensky. 1989. Skeletonization: A technique for trimming the fat from a network via relevance assessment. In Advances in neural information processing systems, pages 107-115.
248
+ Joakim Nivre, Marie-Catherine de Marneffe, Filip Ginter, Jan Hajic, Christopher D. Manning, Sampo Pyysalo, Sebastian Schuster, Francis Tyers, and Daniel Zeman. 2020. Universal Dependencies v2: An evergrowing multilingual treebank collection. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 4034-4043, Marseille, France. European Language Resources Association.
249
+ Sejun Park, Jaeho Lee, Sangwoo Mo, and Jinwoo Shin. 2019. Lookahead: A far-sighted alternative of magnitude-based pruning. In International Conference on Learning Representations.
250
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR.
251
+ Russell Reed. 1993. Pruning algorithms-a survey. IEEE transactions on Neural Networks, 4(5):740-747.
252
+ Adriana Romero, Samira Ebrahimi Kahou, Polytechnique Montréal, Y. Bengio, Université De Montréal, Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio. 2015. Fitnets: Hints for thin deep nets. In in International Conference on Learning Representations (ICLR).
253
+
254
+ Erik Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003, pages 142-147.
255
+ Victor Sanh, Thomas Wolf, and Alexander M Rush. 2020. Movement pruning: Adaptive sparsity by finetuning. arXiv preprint arXiv:2005.07683.
256
+ Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715-1725, Berlin, Germany. Association for Computational Linguistics.
257
+ Sidak Pal Singh and Dan Alistarh. 2020. Woodfisher: Efficient second-order approximation for neural network compression. Advances in Neural Information Processing Systems, 33:18098-18109.
258
+ Slawomir W Stepniewski and Andy J Keane. 1997. Pruning backpropagation neural networks using modern stochastic optimisation techniques. Neural Computing & Applications, 5(2):76-98.
259
+ Xu Sun, Xuancheng Ren, Shuming Ma, and Houfeng Wang. 2017. meprop: Sparsified back propagation for accelerated deep learning with reduced overfitting. In International Conference on Machine Learning, pages 3299-3308. PMLR.
260
+ Chaoqi Wang, Roger Grosse, Sanja Fidler, and Guodong Zhang. 2019. *Eigendamage: Structured pruning in the kronecker-factored eigenbasis*. In International Conference on Machine Learning, pages 6566–6575. PMLR.
261
+ Huan Wang, Can Qin, Yulun Zhang, and Yun Fu. 2020. Neural pruning via growing regularization. In International Conference on Learning Representations.
262
+ Wei Wen, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li. 2016. Learning structured sparsity in deep neural networks. Advances in neural information processing systems, 29.
263
+ Jianbo Ye, Xin Lu, Zhe Lin, and James Z Wang. 2018. Rethinking the smaller-norm-less-informative assumption in channel pruning of convolution layers. In International Conference on Learning Representations.
264
+ Ming Yuan and Yi Lin. 2006. Model selection and estimation in regression with grouped variables. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 68(1):49-67.
265
+ Michael Zhu and Suyog Gupta. 2017. To prune, or not to prune: exploring the efficacy of pruning for model compression. arXiv preprint arXiv:1710.01878.
alignedweightregularizersforpruningpretrainedneuralnetworks/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe3bcc0f1d6871ca452fa8356a4e8e06c1d2b8f578c3a7735c77161e782ba70f
3
+ size 673042
alignedweightregularizersforpruningpretrainedneuralnetworks/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5d5dfe3d4a3b38bfe263cc847c9ef14bbad4c5004c9e662e85364559cb9f09d
3
+ size 379507
amrdadataaugmentationbyabstractmeaningrepresentation/7e6fbf01-bac7-4955-b27c-0dad5aedf719_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eff6a4650283bec3488b1da6b083409613b681795fef24cbb9ccadf2e988163
3
+ size 104132
amrdadataaugmentationbyabstractmeaningrepresentation/7e6fbf01-bac7-4955-b27c-0dad5aedf719_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fca2cbb205062a6349c292eaa5c812aa09e4ab8c39656706e9f4b89301cf1da7
3
+ size 124846
amrdadataaugmentationbyabstractmeaningrepresentation/7e6fbf01-bac7-4955-b27c-0dad5aedf719_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb92e1dea145064711f12de8511d3cea6cb61c03329956b3854b5b16519a053c
3
+ size 976701
amrdadataaugmentationbyabstractmeaningrepresentation/full.md ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AMR-DA: Data Augmentation by Abstract Meaning Representation
2
+
3
+ Ziyi SHOU $^{1}$ Yuxin JIANG $^{2}$ Fangzhen LIN $^{1}$
4
+
5
+ $^{1}$ Department of Computer Science and Engineering
6
+
7
+ $^{2}$ DSA Thrust, Information Hub
8
+
9
+ The Hong Kong University of Science and Technology
10
+
11
+ {zshou,flin}@cse.ust.hk,yjiangcm@connect.ust.hk
12
+
13
+ # Abstract
14
+
15
+ Abstract Meaning Representation (AMR) is a semantic representation for NLP/NLU. In this paper, we propose to use it for data augmentation in NLP. Our proposed data augmentation technique, called AMR-DA, converts a sample sentence to an AMR graph, modifies the graph according to various data augmentation policies, and then generates augmentations from graphs. Our method combines both sentence-level techniques like back translation and token-level techniques like EDA (Easy Data Augmentation). To evaluate the effectiveness of our method, we apply it to the English tasks of semantic textual similarity (STS) and text classification. For STS, our experiments show that AMR-DA boosts the performance of the state-of-the-art models on several STS benchmarks. For text classification, AMR-DA outperforms EDA and AEDA and leads to more robust improvements.
16
+
17
+ # 1 Introduction
18
+
19
+ Data augmentation (DA) techniques automatically generate additional data from existing data set for training machine learning models. They are widely used in computer vision (see, e.g. Perez and Wang, 2017) and can boost the performance of the trained models.
20
+
21
+ In NLP, DA methods can be roughly classified into token-level ones and sentence-level ones (Chen et al., 2021). Token-level DA methods generate new sample sentences from the original ones by changing some of their tokens (words). They include the method in Zhang et al. (2015) that replaces some random tokens by their synonyms using a thesaurus, the now widely used Easy Data Augmentation (EDA) methods in Wei and Zou (2019) that allow some random token insertion, deletion and swaps, and the more recent one in
22
+
23
+ Liu et al. (2020) that performs token replacement using their embeddings. One advantage of these token-level DA methods is that they are easy to implement. However, they can sometimes generate ill-formed or incoherent sentences as they do not take the sentence structures into account. In contrast, sentence-level methods generate new sample sentences by modifying the whole original sentences. They typically work by having an encoder that converts the input sentence to an intermediate representation and a decoder that generates new sentences from the intermediate representations. For example, in back translation (Sennrich et al., 2016), the intermediate representation is a sentence in another natural language. In generation methods (Kumar et al., 2020; Yang et al., 2020), the intermediate representation is a hidden state. One advantage of sentence-level DA methods is that they can preserve the semantics of the sentences. A major limitation of current sentence-level DA methods is that there is not much variation in the generated sentences as the intermediate representations used are not easily controllable (Li et al., 2021). For example, modifying the sentences in back translation requires knowledge of other languages, and minor changes of hidden states severely increase training difficulty.
24
+
25
+ In this paper, we propose a new DA method called AMR-DA that uses the Abstract Meaning Representation (AMR, Banarescu et al., 2013) as the intermediate language. AMR is a well-known semantic meaning representation. It aims to remove syntactic idiosyncrasies and to represent the semantic structure of a sentence as a rooted, directed graph. It works well as an intermediate language for data augmentation as it allows us to combine the token-level and sentence-level methods in a single framework. Like the sentence-level method, our method encodes the entire sentence as an AMR graph. Like the token-level methods, our method manipulates AMR graphs at the node
26
+
27
+ ![](images/05dcb294cf9635398ca901b489fbf82d02223c44244a88e41a5279af1108cd29.jpg)
28
+ (1) Text to AMR
29
+ (2) Graph Modification
30
+ (3) AMR to Text
31
+
32
+ (token) level. Thus our method can augment the original sample sentence in various ways without the need to retrain the decoder. This overcomes a key weakness of the current sentence-level methods. Figure 1 shows an overview of our AMR-DA: AMR parser first transduces the sentence into an AMR graph, followed by an AMR graph extension to diversify graphs with different augmentation strategies; finally, the AMR generator synthesizes augmentations from AMR graphs.
33
+
34
+ To demonstrate the effectiveness of our method, we evaluated AMR-DA on two downstream tasks, semantic textual similarity (STS) and text classification tasks. Experimental results show that our methods boosted unsupervised contrastive learning models to achieve new state-of-the-art results on several benchmarks in STS tasks and outperformed EDA and AEDA in text classification tasks.
35
+
36
+ # 2 AMR-DA
37
+
38
+ # 2.1 Background
39
+
40
+ Abstract Meaning Representations (AMRs, Banarescu et al., 2013) are designed to abstract away from syntactic idiosyncrasies by encoding the concepts of the sentences into nodes and the relations between concepts into directed edges. They are represented as rooted, labeled graphs textually in PENMAN notation (Goodman, 2020) or graphically. Sentences with identical basic meanings are assigned to the same AMR graph. Figure 2 shows that three sentences with varied surface syntax share the
41
+
42
+ ![](images/115ba2eda358a073f671ffc5862f030a6940452da16c4aac91abb4286bbd0ffb.jpg)
43
+ Figure 1: Overview of AMR-DA pipeline: (1) Text to AMR: the AMR parser captures the meaning of the input sentence and transduces it to an AMR graph. (2) Graph Modification: the fundamental choice is not to modify the AMR graph to preserve the entire semantics. Inspired by EDA (Wei and Zou, 2019), we apply four strategies to diversify the graph. RS: random swap; RD: random deletion; RI: random insertion; SR: synonym replacement. (3) AMR to Text: the AMR generator synthesizes sentences from AMR graphs.
44
+ The woman described the mission as a disaster.
45
+ The woman's description of the mission: disaster.
46
+ As the woman described it, the mission was a disaster.
47
+ Figure 2: Three sentences with varied surface syntax share the same AMR. Textual and graphical representations are equal.
48
+
49
+ same AMR. In AMR, variables are introduced for entities, events, properties, and states. For example, "d", "m" in the figure are variables. "d/describe-01" refers to an instance d of the AMR concept "describe-01". "describe" is the frame from Propbank (Kingsbury and Palmer, 2002) and "-01" is the sense of frame. AMR concepts can also be English words such as "woman". When an entity plays multiple roles in a sentence, we re-use the corresponding variable in graph notation, called reentrancy. The phrases begin with": are relations in AMR graphs.":ARG0",":ARG1",":ARG2" are frame arguments, following PropBank conventions. AMR contains approximately 100 relations, in addition to the edges mentioned in the example, there are general semantic relations ("age",":location"), relations for quantities (":quant") and relations for date-entities (":month",":season"), etc.
50
+
51
+ # 2.2 AMR Parsing
52
+
53
+ AMR parser is the first component of AMR-DA (Figure 1). AMR parsing is the task of understanding the sentence and then transducing it to AMR graphs. Lack of explicit alignments between AMR nodes and tokens brings obstacles to AMR parsing. Previous AMR parsers always include complex and fine-grained pre- and post-processing processes. It is very brittle to extend and apply in other tasks. With the help of pretrained language models, sequence-to-sequence (seq2seq) methods win a continual growth of interests. This paper adopts SPRING $^2$ (Bevilacqua et al., 2021), which achieves state-of-the-art performance on AMR parsing, as our AMR parser. SPRING also implemented the generator in their work, however, we adopt another generator with better performance alternatively introduced in section 2.4.
54
+
55
+ SPRING first linearized AMR graphs to sequences through DFS-based PENMAN annotation. Nevertheless, when using seq2seq models, a lack of a clear distinction between variables and concepts may cause confusion. Considering that AMR variables have no semantics, SPRING proposed to use special tokens $\langle \mathsf{R0} \rangle$ , $\langle \mathsf{R1} \rangle$ , ..., $\langle \mathsf{Rn} \rangle$ to represent variables in the linearization graph and to handle co-referring nodes. They also abandoned the redundant slash token "/". Under this setting, AMR graph in Figure 2 became: $(\langle \mathsf{R0} \rangle \text{describe-01}: \mathsf{ARG0}(\langle \mathsf{R1} \rangle \text{woman}): \mathsf{ARG1}(\langle \mathsf{R2} \rangle \text{mission}): \mathsf{ARG2}(\langle \mathsf{R3} \rangle \text{disaster}))$ . Adjacency information was still preserved in the linearization process.
56
+
57
+ After linearizing AMR graphs, SPRING extended a pretrained model, BART (Lewis et al., 2020) which is a transformer-based encoder-decoder model. In order to make BART vocabularies suitable for AMR, they added relations and frames frequently occurring in the training data and initialized the vectors as the average of words embeddings. The results from the seq2seq model need only slight post-processing to transfer sequences to standard PENMAN notations. Details can be found in SPRING paper (Bevilacqua et al., 2021). AMRDA adopts the model which achieves state-of-the-art performance on AMR 2.0 as AMR parser.
58
+
59
+ # 2.3 AMR Graph Modification
60
+
61
+ Discreteness in languages is the obstacle to transferring data augmentation methods from vision to NLP. Token-level methods attempt to apply modi
62
+
63
+ fications on tokens but ignore the entire structure of sentences. However, modifications in sentence-level methods always increase the difficulty of training. The benefit of AMR-DA is that intermediate AMR graphs can be modified through low-cost operations to obtain diverse augmentations; meanwhile, AMR generator will adjust the entire structure of sentences. We shift operations in EDA to AMR graphs. Following EDA, we introduce $\alpha$ to control the percentage of data that operations in AMR-DA will modify.
64
+
65
+ Keep Original (Ori) The fundamental choice is to preserve the entire intermediate AMR graph. In this way, AMR-DA will generate paraphrased text for the input sentence.
66
+
67
+ Random Swap (RS) Traditionally, RS operation randomly chooses words and swaps their positions. However, randomly swapping concepts may impact the performance of AMR generator. In Figure 1, if we want to swap positions of "I" and "so" in the original AMR graph, the final graph becomes":domain (so)" and":degree (I)" which are not expected to appear in a regular AMR graph. Therefore, we swap concepts and their immediately adjacent edges at the same time. More specifically, we swap edge-node pairs":degree (so)" and":domain (I)" instead of tokens. There are two types of effect: if swapping nodes are not siblings, RS operation would change the graph structure, while sibling nodes swapping changes the linearization sequence instead of the graph structure. For one augmentation, RS repeats $n$ times the operation of randomly selecting two edge-node pairs and swapping their positions where $n = \max(1, \alpha \times |\text{edge-node pairs}|)$ . |edge-node pairs| means the number of edge-node pairs.
68
+
69
+ Random Deletion (RD) Instead of removing concepts, we randomly delete concepts with their adjacent edges to guarantee that the rest of graph has necessary components. To control the effects on the AMR graph, RD only applies to leaf nodes. Non-leaf nodes with descendants will possibly have a severe impact on original AMR graphs. For one augmentation, RD repeats random leaf deletion $n$ times where $n = \max(1, \alpha \times |\text{edges-node pairs}|)$ .
70
+
71
+ Random Insertion (RI) RI inserts edge-node pairs instead of concepts to preserve the rationality of AMR graph. We collect edge-node pairs (leaves) from AMR 2.0 training data and filter un
72
+
73
+ suitable pairs based on their edges. For example,":polarity -" which converts the polarity of semantics, is discarded in RI operation. More examples are listed in Appendix A. For one augmentation, RI randomly inserts $n$ pairs where $n = \max(1, \alpha \times |\text{edge-node pairs}|)$ .
74
+
75
+ Synonym Replacement (SR) SR only cares about concepts for that AMR edges are well-designed in AMR. In the linearized graph, we filter tokens that begin with "" and parentheses, randomly select other tokens, and replace them with one of their synonyms correspondingly. SR randomly replace $n$ concepts where $n = \max(1, \alpha \times |\text{concepts}|)$ . We substitute similar words according to PPDB synonym (Pavlick et al., 2015). The substitution function is included in $\text{nlpaug}^3$ .
76
+
77
+ # 2.4 AMR Generation
78
+
79
+ AMR generation generates sentences from the AMR graph, which is the inverse task of AMR parsing. Pretrained transformer-based architectures gradually dominate the development trend of generators (Mager et al., 2020; Bevilacqua et al., 2021). Ribeiro et al.4 proposed a generator based on pretrained language models (PLMs-generator) and added extra task-adaptive pretraining. Compared with SPRING, PLMs-generator simplifies PENMAN annotations without adding special tokens as pointers. They examined and compared two PLMs, BART and T5 (Raffel et al., 2019). PLMs-generator continued task-specific pretraining using language model adaptation (LMA) or supervised task adaptation (STA) training with silver data they collected. Details can be found in the paper (Ribeiro et al., 2021). The default AMR generator in our experiments is based on T5-base.
80
+
81
+ # 3 Experiments
82
+
83
+ We conduct experiments on two NLP tasks, semantic textual similarity tasks and text classification tasks, to evaluate our augmentation method.
84
+
85
+ # 3.1 Semantic Textual Similarity Tasks
86
+
87
+ Semantic textual similarity deals with determining how similar two pieces of sentences are. Recently, contrastive learning has become an influential formalism for unsupervised sentence representation, based on the idea of concentrating similar samples
88
+
89
+ and pushing apart dissimilar samples in the vector space (Chen et al., 2020). That is, given a set of paired sentences $\mathcal{D} = \{(x_i,x_i^+)\}_{i = 1}^m$ where $x_{i}$ and $x_{i}^{+}$ are semantically related, we regard $x_{i}^{+}$ as "positive" of $x_{i}$ and other sentences in the same mini-batch as "negatives". Let $\mathbf{h}_i$ and $\mathbf{h}_i^+$ denote the representations of $x_{i}$ and $x_{i}^{+}$ , then the training objective for a mini-batch of size N is:
90
+
91
+ $$
92
+ \ell_ {i} = - \log \frac {\exp^ {s i m (\mathbf {h} _ {i} , \mathbf {h} _ {i} ^ {+})} / \tau}{\sum_ {j = 1} ^ {N} \exp^ {s i m (\mathbf {h} _ {i} , \mathbf {h} _ {j} ^ {+})} / \tau}
93
+ $$
94
+
95
+ where $\tau$ is a temperature hyperparameter and $sim(\mathbf{h}_1,\mathbf{h}_2)$ is the cosine similarity function.
96
+
97
+ Data augmentation, as the central issue in unsupervised contrastive learning, is utilized to construct "positive pairs". SimCSE (Gao et al., 2021) puts one sentence through pretrained model twice with varied standard dropout masks inside transformers as a minimal form of data augmentation. Although it performs quite well, there still exists a large margin between unsupervised and supervised models. Here we propose a hypothesis that an effective data augmentation in this task requires distinct syntax but related semantics. For this reason, we use AMR-DA as data augmentation to construct positive instances.
98
+
99
+ # 3.1.1 Experimental Settings
100
+
101
+ To verify the effectiveness of AMR-DA, we choose recently proposed models unsup-ConSERT (Yan et al., 2021) and unsup-SimCSE (Gao et al., 2021), which are referred as ConSERT and SimCSE for simplification, as our baseline models. We only replace the original data augmentation methods inside the two models with AMR-DA.
102
+
103
+ We evaluate on seven STS datasets including STS 2012-2016 (Agirre et al., 2012, 2013, 2014, 2015, 2016), STS Benchmark (Cer et al., 2017) and SICK-Relatedness (Marelli et al., 2014) and report Spearman's correlation.
104
+
105
+ Following ConSERT, we use a mixture of unlabeled texts from seven STS datasets as training data and average the token embeddings at the last two layers as the sentence embedding. Following SimCSE, we use 1-million sentences randomly sampled from English Wikipedia as training data and adopt the [CLS] representation with an MLP layer on top of it as the sentence embedding. More training details could be found in Appendix B.
106
+
107
+ <table><tr><td>Model</td><td>Avg.</td></tr><tr><td>BERTbase†</td><td>63.84</td></tr><tr><td>+token augmentations (ConSERT)†</td><td>72.74</td></tr><tr><td>+AMR-RS augmentation</td><td>76.11</td></tr><tr><td>+AMR-RD augmentation</td><td>74.34</td></tr><tr><td>+AMR-RI augmentation</td><td>75.31</td></tr><tr><td>+AMR-SR augmentation</td><td>75.68</td></tr><tr><td>+AMR-Ori augmentation</td><td>76.14</td></tr></table>
108
+
109
+ # 3.1.2 Main Results
110
+
111
+ The first question is which operation we should choose for contrastive learning in the STS task. Table 1 shows the comparison on different augmentation strategies. ConSERT considered cutoff and shuffle token augmentations while we replaced their DA with AMR-DA. The results show that all operations in AMR-DA outperform ConSERT with token augmentations. Since we use AMR-DA to construct positive pairs for STS model training, Table 1 presents that AMR-Ori generates augmentations more similar to the original sentences than other operations. To access the diversity of augmented data, we adopt F1 measured between two bags of words as lexical overlap score. A higher lexical overlap F1 indicates more overlap between augmented data and original sentences and less diversity. Table 2 provides the summary statistics for various operations of AMR-DA.
112
+
113
+ Table 1: Performance comparison of models with different AMR-DA operations. †: results from Yan et al., 2021.
114
+
115
+ <table><tr><td>AMR Operation</td><td>Ori</td><td>RS</td><td>RD</td><td>RI</td><td>SR</td></tr><tr><td>Overlap F1</td><td>0.554</td><td>0.531</td><td>0.476</td><td>0.510</td><td>0.449</td></tr></table>
116
+
117
+ Table 3 shows the main results, where the highest numbers among models with the same pretrained encoder are highlighted in bold. Only changing the data augmentation module in ConSERT and SimCSE to AMR-DA, the performance could be boosted substantially to the state-of-the-art. AMR-ConSERT obtains absolute improvements of 3.40 and 1.74 on $\mathrm{BERT}_{\text{base}}$ and $\mathrm{BERT}_{\text{large}}$ respectively compared with the original ConSERT that utilizes feature cutoff and shuffle on tokens as DA methods. While AMR-SimCSE outperforms SimCSE significantly on $\mathrm{BERT}_{\text{base}}$ (1.70 ↑), $\mathrm{BERT}_{\text{large}}$ (1.22 ↑), RoBERTa $\text{base}$ (1.86 ↑) and RoBERTa $\text{large}$ (0.80
118
+
119
+ Table 2: Overlap F1 score of AMR-DA operations.
120
+
121
+ <table><tr><td>Model</td><td>Avg.</td></tr><tr><td colspan="2">unsup-ConSERT Setups</td></tr><tr><td>ConBERT-BERTbase†</td><td>72.74</td></tr><tr><td>AMR-ConSERT-BERTbase</td><td>76.14 (+3.40)</td></tr><tr><td>ConSERT-BERTlarge†</td><td>76.45</td></tr><tr><td>AMR-ConSERT-BERTlarge</td><td>78.19 (+1.74)</td></tr><tr><td colspan="2">unsup-SimCSE Setups</td></tr><tr><td>SimCSE-BERTbase‡</td><td>76.25</td></tr><tr><td>+ back translation</td><td>71.71</td></tr><tr><td>ESimCSE-BERTbase§</td><td>78.27</td></tr><tr><td>- momentum contrast</td><td>77.43</td></tr><tr><td>AMR-SimCSE-BERTbase</td><td>77.95 (+1.70)</td></tr><tr><td>SimCSE-BERTlarge‡</td><td>78.41</td></tr><tr><td>ESimCSE-BERTlarge§</td><td>79.31</td></tr><tr><td>AMR-SimCSE-BERTlarge</td><td>79.63 (+1.22)</td></tr><tr><td>SimCSE-RoBERTabase‡</td><td>76.57</td></tr><tr><td>ESimCSE-RoBERTabase§</td><td>77.44</td></tr><tr><td>AMR-SimCSE-RoBERTabase</td><td>78.43 (+1.86)</td></tr><tr><td>SimCSE-RoBERTalarge‡</td><td>78.90</td></tr><tr><td>ESimCSE-RoBERTalarge§</td><td>79.45</td></tr><tr><td>AMR-SimCSE-RoBERTalarge</td><td>79.70 (+0.80)</td></tr></table>
122
+
123
+ Table 3: The average sentence embedding performance on seven STS test sets, in terms of Spearman's correlation. †: results from Yan et al., 2021. ‡: results from Gao et al., 2021; ; §: results from Wu et al., 2021. Models begin with "AMR" are the models with AMR-DA.
124
+
125
+ $\uparrow$ ). We also make a comparison between our models and current state-of-the-art model ESimCSE (Wu et al., 2021), which uses word repetition to construct positive pairs and momentum contrast to expand negative pairs. Experimental results indicate that AMR-SimCSE surpasses ESimCSE on $\mathrm{BERT}_{large}(0.33\uparrow)$ , RoBERTa $^{base}$ $(0.99\uparrow)$ and RoBERTa $^{large}(0.25\uparrow)$ . If we discard momentum contrast in ESimCSE and only compare the effectiveness of DA methods, AMR-SimCSE (77.95) outperforms ESimCSE (77.43) on $\mathrm{BERT}_{base}$ .
126
+
127
+ In addition, we implemented SimCSE with back translation based on WMT'19 English-German translation models (Ng et al., 2019) as the DA method. We use random sampling for decoding as recommended by (Edunov et al., 2018a), and set the temperature to 0.8. Other training settings are the same as those of SimCSE. As shown in Table 3, back translation is inferior to AMR-DA in STS tasks. The possible reason is that augmentations with limited diversity are hard to improve
128
+
129
+ pretrained models.
130
+
131
+ # 3.2 Text Classification Tasks
132
+
133
+ Text classification tasks are widely studied in many real applications, such as document categorization, email spam filtering, etc. The performance of machine learning methods in this task always depends on the quality of training data. How to use DA techniques to improve machine learning systems attracts a number of studies (Wang and Yang, 2015; Wei and Zou, 2019; Liu et al., 2020; Karimi et al., 2021). AMR-DA is partly inspired by EDA, which explores text editing techniques for data augmentation. EDA performs SR, RI, RS, or RD operations on tokens, whereas AMR-DA performs these DA strategies on AMR graphs. In order to answer whether DA strategies on AMR graphs perform better than on tokens, we conduct a fair assessment on EDA and AMR-DA. In addition, to show the effectiveness of AMR-DA, we take AEDA (Karimi et al., 2021), another strong DA, into comparison.
134
+
135
+ # 3.2.1 Experimental Settings
136
+
137
+ We conduct experiments on four benchmark datasets: Standford Sentiment Treebank (SST-2, Socher et al., 2013); Customer Reviews Dataset (CR, Hu and Liu, 2004; Liu et al., 2015b), Subjectivity/Objectivity Dataset (SUBJ, Pang and Lee, 2004); Pros and Cons Dataset (PC, Ganapathibhotla and Liu, 2008). The detailed statistics are listed in Table F.5.
138
+
139
+ We chose Recurrent Neural Network (RNN, Liu et al., 2016), Convolutional Neural Network (CNN, Kim, 2014) and BERT (Devlin et al., 2019) as backbone models.
140
+
141
+ Data selection module has been modified to be close to application scenarios in real life. We select proportions of original training data and then add the corresponding augmentations for that only visible data can be extended. Experimental setups are identical to all DA methods. All experiments are run with five different random seeds and reported as average performance. Training details are in Appendix C.
142
+
143
+ # 3.2.2 Main Results
144
+
145
+ We ran CNN, RNN and BERT across all four datasets using three DA methods. First, we added one augmented sentence for each instance to assess the effectiveness of single augmentation. We reported the average performance of all different operations in EDA and AMR-DA as final one aug
146
+
147
+ <table><tr><td>Model</td><td>CNN</td><td>RNN</td><td>BERT</td><td>Avg.</td></tr><tr><td>Original</td><td>88.15</td><td>86.49</td><td>93.19</td><td>89.28</td></tr><tr><td colspan="5">With 1 augmentation</td></tr><tr><td>+EDA</td><td>87.29</td><td>86.16</td><td>93.39</td><td>88.92</td></tr><tr><td>+AEDA</td><td>88.30</td><td>87.59</td><td>93.19</td><td>89.69</td></tr><tr><td>+AMR-DA</td><td>88.40</td><td>87.63</td><td>93.47</td><td>89.83</td></tr><tr><td colspan="5">With 5 augmentations</td></tr><tr><td>+EDA</td><td>87.75</td><td>86.37</td><td>93.29</td><td>89.14</td></tr><tr><td>+AEDA</td><td>88.78</td><td>87.21</td><td>93.53</td><td>89.84</td></tr><tr><td>+AMR-DA</td><td>88.80</td><td>88.00</td><td>93.54</td><td>90.11</td></tr></table>
148
+
149
+ Table 4: Average performance of CNN, RNN and BERT trained on original, EDA, AEDA and AMRDA (with 1 or 5 augmentations for each instance) data across all datasets.
150
+
151
+ mentation performance. As the top part of Table 4 shows, the average improvement of AMR-DA on three models is $0.55\%$ , which is $0.91\%$ better than EDA and $0.14\%$ better than AEDA, respectively. How about using all operations to augment data in the training process? To answer this question, we added each operation augmentations together in AMR-DA and trained models with all five augmentations. Correspondingly, we randomly selected five augmentations using AEDA and EDA operations. We reported the average performance in the bottom part of Table 4. AMR-DA achieved $0.83\%$ performance gain with five augmentations better than one augmentation, which means our operations brought diversified information to improve models. Regarding the effectiveness of operations (SR, RI, RS and RD), we made a detailed comparison on EDA and AMR-DA. Figure 3 shows that AMR-DA outperforms EDA remarkably on various fractions of the training set.
152
+
153
+ ![](images/04e6057249bff66879eb58a788283d436a9f385b19476377e2202cd792aa501d.jpg)
154
+ Figure 3: Average performance of RNN model trained on different proportions of original, EDA and AMRDA training data for four datasets.
155
+
156
+ # 4 Analysis
157
+
158
+ Effect of AMR Generators From the introduction in Section 2.1, paraphrased sentences correspond to the identical AMR graph. In other words, AMR graph to sentences is a one-to-many relationship. Since there is no uniform evaluation of AMR generators, it is necessary to study the impact of AMR generators on the performance of AMR-DA. We compared AMR-Ori with various generators based on BART<sub>base</sub>, T5<sub>small</sub> and T5<sub>base</sub>. Table 5 shows comparison on PLMs-generators. We found that pretrained models with larger sizes are capable of generating better quality augments. So we choose AMR generator with T5<sub>base</sub> as final generator in AMR-DA.
159
+
160
+ <table><tr><td>Model</td><td>Avg.</td></tr><tr><td>BERTbase-flow‡</td><td>66.55</td></tr><tr><td>SimCSE-BERTbase‡</td><td>76.25</td></tr><tr><td>AMR(BARTbase generator)-SimCSE</td><td>77.81</td></tr><tr><td>AMR(T5small generator)-SimCSE</td><td>77.65</td></tr><tr><td>AMR(T5base generator)-SimCSE</td><td>77.95</td></tr></table>
161
+
162
+ Table 5: Performance of AMR-DA (Ori) in STS tasks with various generators.‡: results from Gao et al., 2021;§: results from Wu et al., 2021.
163
+
164
+ Why does AMR-DA work in STS task? To answer this question, we use alignment and uniformity, which are proposed by (Wang and Isola, 2020) to measure the quality of representations. Alignment calculates how close the positive instances stay, while uniformity evaluates how uniformly the random instances are scattered on the hypersphere. For both metrics, lower numbers are better. We take the checkpoint of SimCSE and AMR-SimCSE every 10 steps during training (100 steps
165
+
166
+ ![](images/43f52c2c2eefc899d311918ed9e85ed91eaf571c1d2dca6868d929e0a021470f.jpg)
167
+ Figure 4: Alignment-uniformity plot on STSB dataset.
168
+
169
+ in total) and visualize the alignment and uniformity computed on STSB dataset. Figure 4 demonstrates that both SimCSE and AMR-SimCSE improve the uniformity steadily. Additionally, AMR-SimCSE provides a continuously decreasing alignment. It verifies our hypothesis that data augmentation with different syntax but highly related semantics results in better sentence embeddings.
170
+
171
+ Analysis of Generated Outputs To analyze generated outputs by back-translation and AMR-Ori, we use supervised SimCSE-RoBERTa<sub>large</sub>, which achieves the state-of-the-art performance on various semantic textual similarity benchmarks, to compute the sentence embedding cosine similarity between the generated sentences and the original ones. Figure 5 summarizes the results. First we can see
172
+
173
+ ![](images/2107b5d84f581d301bc2b9c336ea820c2dbe015c4ad046402429ee55b7e707cf.jpg)
174
+ Figure 5: Semantic similarity scores of backtranslation and AMR-Ori augmentations (data from Table 3).
175
+
176
+ that for both AMR-Ori and back-translation, their generated sentences have high similarity scores with the original sentences. However, AMR-Ori generates much more diversified outputs. For back-translation, more than $30\%$ of the generated sentences have the similarity score of 1.0 (highest) with their original sentences, and more than $50\%$ of them have the similarity score of 0.99 or above. While AMR-Ori is more uniform. The highest frequency rate, about $10\%$ , is at the similarity score of 0.97.
177
+
178
+ We also computed the F1 scores measured between two bags of words. We find that the overlap score of back-translation method is 0.760, compared to 0.566 for AMR-Ori (evaluated using unsupervised SimCSE experiment data in Table 3).
179
+
180
+ For illustration, we list some examples of back-translation and AMR-Ori in Table 6 and more in Table D.3 in the appendix. One could see that back-translation paraphrases source sentences with little
181
+
182
+ <table><tr><td>Source</td><td>IDS Tirana is a football club based in Tirana, Albania.</td></tr><tr><td>Back Translation</td><td>IDS Tirana is a football club from Tirana, Albania.</td></tr><tr><td>AMR-Ori</td><td>The football club IDS Tirana is based in Tirana, Albania.</td></tr><tr><td>Source</td><td>The library was established through the philanthropy of Martha Bayard Stevens.</td></tr><tr><td>Back Translation</td><td>The library was founded through the philanthropy of Martha Bayard Stevens.</td></tr><tr><td>AMR-Ori</td><td>Martha Bayard Stevens philanthropy has established a library.</td></tr><tr><td>Source</td><td>A meeting of promoters was also held at Presbyterian Church.</td></tr><tr><td>Back Translation</td><td>A meeting of the project promoters was also held in the Presbyterian Church.</td></tr><tr><td>AMR-Ori</td><td>The promoters also held a meeting at the Presbyterian Church.</td></tr></table>
183
+
184
+ Table 6: Augmented examples generated by back-translation and AMR-Ori (no edits on intermediate AMR graphs) from source sentences.
185
+
186
+ modification. On the other hand, AMR-Ori can produce quite different sentences even though it does not modify the intermediate representations. A key factor is that AMR graphs abstract away from syntactic idiosyncrasies while retain semantic frame arguments.
187
+
188
+ Finally, Table D.4 in the appendix lists some example outputs from EDA and AMR-DA. The original sentence is the same as EDA-None. Except for between EDA-None and AMR-Ori, AMR-DA generated outputs are more fluent than their corresponding outputs by EDA.
189
+
190
+ # 5 Related Work
191
+
192
+ Our proposed data augmentation method is based on manipulating AMR graphs. Similar tree-edit techniques on syntax trees have been found to be useful in paraphrases generation (Heilman and Smith, 2010; Vila and Dras, 2012). Other applications of AMR have also been based on graph manipulation. For example, Liu et al. (2015a) used AMR in summarization task by first parsing the source text to a set of graphs, transforming it to a summary graph, and then generating a summary using the summary graph. Sachan and Xing (2016) represented text and questions as AMR graphs and reduced the machine comprehension problem to a graph containment problem. We have seen a growing body of work that makes use of AMR in other applications such as dialogue modeling, information extraction and commonsense reasoning (Bai et al., 2021; Zhang et al., 2021; Lim et al., 2020).
193
+
194
+ Based on the influence scope of augmentation, related data augmentation methods can be roughly classified into token-level and sentence-level methods (Chen et al., 2021).
195
+
196
+ In token-level, synonyms replacement, random
197
+
198
+ swap, random insertion, random deletion (Zhang et al., 2015; Wei and Zou, 2019) have been proven to improve the performance in classification tasks. In STS task, plenty of data augmentation techniques have been utilized such as shuffling, cutoff (Yan et al., 2021), synonyms replace (Wang et al., 2021), word repetition (Wu et al., 2021), etc. However, these methods all risk impairing structure information, resulting in incoherent augmentations.
199
+
200
+ In contrast, sentence-level take the whole sentence into consideration. Widely used back translation (Sennrich et al., 2016; Edunov et al., 2018b; Qu et al., 2021) translates sentences into intermediate languages and then translates back. Some studies attempt to incorporate syntactic information (Chen et al., 2019) or latent variables (Gupta et al., 2018) to guide generators synthesize various augmentations. But these methods significantly increase the training difficulty. AMR-DA uses AMR as an intermediate language, which can modify graphs as easily as in token-level methods, and synthesizes high-quality and diversified augmentations without grinding in training.
201
+
202
+ # 6 Conclusion and Future Work
203
+
204
+ We propose a novel data augmentation method called AMR-DA. AMR-DA transduces sentences to AMR graphs, applies multiple strategies to modify graphs, and then generates diversified augmentations. To the best of our knowledge, this paper is the first work that utilizes AMR for data augmentation. AMR-DA overcomes the deficiency of previous sentence-level generation methods and diversifies augmentations without retraining decoders. Our experiments show that AMR-DA boosts the performance of models to achieve state-of-the-art results in several STS benchmarks and outperforms
205
+
206
+ EDA and AEDA in text classification tasks. In this paper, we mainly use AMR-DA to generate positive augmentations. Further research could use AMR-DA to carefully construct adversarial samples for specific tasks and.
207
+
208
+ # References
209
+
210
+ Eneko Agirre, Carmen Banea, Claire Cardie, Daniel M. Cer, Mona T. Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Inigo Lopez-Gazpio, Montse Maritxalar, Rada Mihalcea, German Rigau, Larraitz Uria, and Janyce Wiebe. 2015. Semeval-2015 task 2: Semantic textual similarity, english, spanish and pilot on interpretability. In Proceedings of the 9th International Workshop on Semantic Evaluation, SemEval@NAACL-HLT 2015, Denver, Colorado, USA, June 4-5, 2015, pages 252-263. The Association for Computer Linguistics.
211
+ Eneko Agirre, Carmen Banea, Claire Cardie, Daniel M. Cer, Mona T. Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2014. Semeval-2014 task 10: Multilingual semantic textual similarity. In Proceedings of the 8th International Workshop on Semantic Evaluation, SemEval@COLING 2014, Dublin, Ireland, August 23-24, 2014, pages 81-91. The Association for Computer Linguistics.
212
+ Eneko Agirre, Carmen Banea, Daniel M. Cer, Mona T. Diab, Aitor Gonzalez-Agirre, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2016. Semeval-2016 task 1: Semantic textual similarity, monolingual and cross-lingual evaluation. In Proceedings of the 10th International Workshop on Semantic Evaluation, SemEval@NAACL-HLT 2016, San Diego, CA, USA, June 16-17, 2016, pages 497-511. The Association for Computer Linguistics.
213
+ Eneko Agirre, Daniel M. Cer, Mona T. Diab, and Aitor Gonzalez-Agirre. 2012. Semeval-2012 task 6: A pilot on semantic textual similarity. In Proceedings of the 6th International Workshop on Semantic Evaluation, SemEval@NAACL-HLT 2012, Montreal, Canada, June 7-8, 2012, pages 385-393. The Association for Computer Linguistics.
214
+ Eneko Agirre, Daniel M. Cer, Mona T. Diab, Aitor Gonzalez-Agirre, and Weiwei Guo. 2013. *sem 2013 shared task: Semantic textual similarity. In Proceedings of the Second Joint Conference on Lexical and Computational Semantics, *SEM 2013, June 13-14, 2013, Atlanta, Georgia, USA, pages 32-43. Association for Computational Linguistics.
215
+ Xuefeng Bai, Yulong Chen, Linfeng Song, and Yue Zhang. 2021. Semantic representation for dialogue modeling. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers),
216
+
217
+ pages 4430-4445, Online. Association for Computational Linguistics.
218
+ Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffitht, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2013. Abstract meaning representation for sembanking. In Proceedings of the 7th linguistic annotation workshop and interoperability with discourse, pages 178-186.
219
+ Michele Bevilacqua, Rexhina Blloshmi, and Roberto Navigli. 2021. One spring to rule them both: Symmetric amr semantic parsing and generation without a complex pipeline. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 12564-12573.
220
+ Daniel M. Cer, Mona T. Diab, Eneko Agirre, Inigo Lopez-Gazpio, and Lucia Specia. 2017. Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evaluation, SemEval@ACL 2017, Vancouver, Canada, August 3-4, 2017, pages 1-14. Association for Computational Linguistics.
221
+ Jiaao Chen, Derek Tam, Colin Raffel, Mohit Bansal, and Diyi Yang. 2021. An empirical survey of data augmentation for limited data learning in nlp. arXiv preprint arXiv:2106.07499.
222
+ Mingda Chen, Qingming Tang, Sam Wiseman, and Kevin Gimpel. 2019. Controllable paraphrase generation with a syntactic exemplar. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5972-5984.
223
+ Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey E. Hinton. 2020. A simple framework for contrastive learning of visual representations. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pages 1597-1607. PMLR.
224
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186.
225
+ Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018a. Understanding back-translation at scale. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 - November 4, 2018, pages 489-500. Association for Computational Linguistics.
226
+ Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018b. Understanding back-translation at
227
+
228
+ scale. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 489-500, Brussels, Belgium. Association for Computational Linguistics.
229
+ Murthy Ganapathibhotla and Bing Liu. 2008. Mining opinions in comparative sentences. In Proceedings of the 22nd International Conference on Computational Linguistics (Coling 2008), pages 241-248.
230
+ Tianyu Gao, Xingcheng Yao, and Danqi Chen. 2021. SimCSE: Simple contrastive learning of sentence embeddings. In Empirical Methods in Natural Language Processing (EMNLP).
231
+ Michael Wayne Goodman. 2020. Penman: An open-source library and tool for AMR graphs. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, ACL 2020, Online, July 5-10, 2020, pages 312-319. Association for Computational Linguistics.
232
+ Ankush Gupta, Arvind Agarwal, Prawaan Singh, and Piyush Rai. 2018. A deep generative framework for paraphrase generation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.
233
+ Michael Heilman and Noah A Smith. 2010. Tree edit models for recognizing textual entailments, paraphrases, and answers to questions. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 1011-1019.
234
+ Minqing Hu and Bing Liu. 2004. Mining and summarizing customer reviews. In Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining, pages 168-177.
235
+ Akbar Karimi, Leonardo Rossi, and Andrea Prati. 2021. Aeda: An easier data augmentation technique for text classification.
236
+ Yoon Kim. 2014. Convolutional neural networks for sentence classification. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1746-1751, Doha, Qatar. Association for Computational Linguistics.
237
+ Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
238
+ Paul Kingsbury and Martha Palmer. 2002. From TreeBank to PropBank. In Proceedings of the Third International Conference on Language Resources and Evaluation (LREC'02), Las Palmas, Canary Islands - Spain. European Language Resources Association (ELRA).
239
+
240
+ Varun Kumar, Ashutosh Choudhary, and Eunah Cho. 2020. Data augmentation using pre-trained transformer models. In Proceedings of the 2nd Workshop on Life-long Learning for Spoken Language Systems, pages 18-26. Association for Computational Linguistics.
241
+ Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
242
+ Bohan Li, Yutai Hou, and Wanxiang Che. 2021. Data augmentation approaches in natural language processing: A survey. arXiv preprint arXiv:2110.01852.
243
+ Jungwoo Lim, Dongsuk Oh, Yoonna Jang, Kisu Yang, and Heui-Seok Lim. 2020. I know what you asked: Graph path learning using amr for commonsense reasoning. In Proceedings of the 28th International Conference on Computational Linguistics, pages 2459-2471.
244
+ Fei Liu, Jeffrey Flanigan, Sam Thomson, Norman Sadeh, and Noah A. Smith. 2015a. Toward abstractive summarization using semantic representations. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1077-1086, Denver, Colorado. Association for Computational Linguistics.
245
+ Pengfei Liu, Xipeng Qiu, and Xuanjing Huang. 2016. Recurrent neural network for text classification with multi-task learning. In Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, IJCAI'16, page 2873-2879. AAAI Press.
246
+ Qian Liu, Zhiqiang Gao, Bing Liu, and Yuanlin Zhang. 2015b. Automated rule selection for aspect extraction in opinion mining. In Twenty-Fourth international joint conference on artificial intelligence.
247
+ Sisi Liu, Kyungmi Lee, and Ickjai Lee. 2020. Document-level multi-topic sentiment classification of email data with bilstm and data augmentation. Knowledge-Based Systems, 197:105918.
248
+ Manuel Mager, Ramón Fernandez Astudillo, Tahira Naseem, Md Arafat Sultan, Young-Suk Lee, Radu Florian, and Salim Roukos. 2020. GPT-too: A language-model-first approach for AMR-to-text generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1846-1852, Online. Association for Computational Linguistics.
249
+ Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, Roberto Zamparelli,
250
+
251
+ et al. 2014. A sick cure for the evaluation of compositional distributional semantic models. In Lrec, pages 216-223. Reykjavik.
252
+ Nathan Ng, Kyra Yee, Alexei Baevski, Myle Ott, Michael Auli, and Sergey Edunov. 2019. Facebook FAIR's WMT19 news translation task submission. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 314-319, Florence, Italy. Association for Computational Linguistics.
253
+ Bo Pang and Lillian Lee. 2004. A sentimental education: Sentiment analysis using subjectivity summarization based on minimum cuts. In Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL-04), pages 271-278.
254
+ Ellie Pavlick, Pushpendre Rastogi, Juri Ganitkevitch, Benjamin Van Durme, and Chris Callison-Burch. 2015. Ppdb 2.0: Better paraphrase ranking, fine-grained entailment relations, word embeddings, and style classification. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 425-430.
255
+ Luis Perez and Jason Wang. 2017. The effectiveness of data augmentation in image classification using deep learning. arXiv preprint arXiv:1712.04621.
256
+ Yanru Qu, Dinghan Shen, Yelong Shen, Sandra Sajeev, Weizhu Chen, and Jiawei Han. 2021. Coda: Contrast-enhanced and diversity-promoting data augmentation for natural language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net.
257
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683.
258
+ Leonardo FR Ribeiro, Martin Schmitt, Hinrich Schütze, and Iryna Gurevych. 2021. Investigating pretrained language models for graph-to-text generation. In Proceedings of the 3nd Workshop on Natural Language Processing for Conversational AI.
259
+ Mrinmaya Sachan and Eric Xing. 2016. Machine comprehension using rich semantic representations. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 486-492, Berlin, Germany. Association for Computational Linguistics.
260
+ Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation models with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 86-96.
261
+
262
+ Richard Socher, John Bauer, Christopher D Manning, and Andrew Y Ng. 2013. Parsing with compositional vector grammars. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 455-465.
263
+ Marta Vila and Mark Dras. 2012. Tree edit distance as a baseline approach for paraphrase representation. Procesamento del languaje natural, 48:89-95.
264
+ Dong Wang, Ning Ding, Piji Li, and Haitao Zheng. 2021. CLINE: Contrastive learning with semantic negative examples for natural language understanding. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2332-2342, Online. Association for Computational Linguistics.
265
+ Tongzhou Wang and Phillip Isola. 2020. Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pages 9929-9939. PMLR.
266
+ William Yang Wang and Diyi Yang. 2015. That's so annoying!!!: A lexical and frame-semantic embedding based data augmentation approach to automatic categorization of annoying behaviors using# petpeeve tweets. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2557-2563.
267
+ Jason Wei and Kai Zou. 2019. Eda: Easy data augmentation techniques for boosting performance on text classification tasks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 6382-6388.
268
+ Xing Wu, Chaochen Gao, Liangjun Zang, Jizhong Han, Zhongyuan Wang, and Songlin Hu. 2021. Esimcse: Enhanced sample building method for contrastive learning of unsupervised sentence embedding. CoRR, abs/2109.04380.
269
+ Yuanmeng Yan, Rumei Li, Sirui Wang, Fuzheng Zhang, Wei Wu, and Weiran Xu. 2021. Consert: A contrastive framework for self-supervised sentence representation transfer. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/IJCNLP 2021, Virtual Event, August 1-6, 2021, pages 5065-5075. Association for Computational Linguistics.
270
+ Yiben Yang, Chaitanya Malaviya, Jared Fernandez, Swabha Swayamdipta, Ronan Le Bras, Ji-Ping Wang, Chandra Bhagavatula, Yejin Choi, and Doug
271
+
272
+ Downey. 2020. Generative data augmentation for commonsense reasoning. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1008-1025, Online. Association for Computational Linguistics.
273
+
274
+ Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. Advances in neural information processing systems, 28:649-657.
275
+
276
+ Zixuan Zhang, Nikolaus Parulian, Heng Ji, Ahmed Elsayed, Skatje Myers, and Martha Palmer. 2021. Fine-grained information extraction from biomedical literature based on knowledge-enriched abstract meaning representation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6261-6270.
277
+
278
+ # A Discarding pairs in RI operation
279
+
280
+ We filter pairs based on edge properties. The discarding edges are listed in the following table.
281
+
282
+ <table><tr><td>Edge</td><td>Reasons</td></tr><tr><td>:ARGn</td><td>potential ambiguity of arguments</td></tr><tr><td>:polarity</td><td>convert the polarity of semantics</td></tr><tr><td>:wiki</td><td>Unsuitable for most graphs</td></tr><tr><td>:opn</td><td>Unsuitable for most graphs</td></tr><tr><td>:sntn</td><td>Unsuitable for most graphs</td></tr><tr><td>:value</td><td>Unsuitable for most graphs</td></tr></table>
283
+
284
+ # B STS tasks Training Details
285
+
286
+ For AMR-SimCSE, grid-search of batch size $\in$ {64,96,128,160} and learning rate $\in$ {5e-6, 1e-5, 3e-5, 5e-5} is carried out on STS-B development set, and the hyperparameter settings are listed in Table B.1. The dropout rate is set to 0.1 for base models and 0.15 for large models. We use the temperature $\tau = 0.05$ for all the experiments. During training, we found that a larger maximum sequence length equal to 96 benefits our AMR-SimCSE, while in SimCSE the value is 32. So we also enlarge the maximum sequence length to 96 in SimCSE but do not observe any improvement.
287
+
288
+ <table><tr><td></td><td colspan="2">BERT</td><td colspan="2">RoBERTa</td></tr><tr><td></td><td>base</td><td>large</td><td>base</td><td>large</td></tr><tr><td>Batch size</td><td>96</td><td>128</td><td>160</td><td>96</td></tr><tr><td>Learning rate</td><td>3e-5</td><td>3e-5</td><td>5e-5</td><td>5e-6</td></tr></table>
289
+
290
+ For AMR-ConSERT, we use hyperparameter settings that are the same as the original paper.
291
+
292
+ # C Text Classification Training Details
293
+
294
+ For CNN models, we followed the architecture in EDA and modified filters. The entire architecture of our CNN: input layer; the concatenation of 1D convolutional layer of 128 filters of size 3, 4 and 5 with global 1D max pool layer for each convolutional layer; dropout layer with $\rho = 0.2$ ; dense layer of 20 hidden units with ReLU activation function, softmax output layer. Other CNN settings and RNN settings are identical to EDA. As for BERT experiments, we adopt base, uncased version BERT as backbone and the Adam optimizer (Kingma and Ba, 2015) with an initial learning rate of 2e-5. We pick the best checkpoint according to the validation loss. Random seeds are from 0 to 4. The default alpha setting for 4 operations are listed in the following table:
295
+
296
+ Table B.1: Hyperparameters for AMR-SimCSE.
297
+
298
+ <table><tr><td></td><td>RS</td><td>RD</td><td>RI</td><td>SR</td></tr><tr><td>α</td><td>0.05</td><td>0.1</td><td>0.05</td><td>0.1</td></tr></table>
299
+
300
+ Table C.2: Setting of $\alpha$ for four different operations.
301
+
302
+ # D Comparison on Data Augmentation Outputs
303
+
304
+ More examples on generated outputs from backtranslation and AMR-Ori are presented in Table D.3. Augmented examples using EDA and AMRDA are presented in Table D.4.
305
+
306
+ # E Effect of alpha in Augmentation Operations
307
+
308
+ We test each of operations individually for different training set sizes to determine their ability with $\alpha = 0.05, 0.1, 0.2, 0.3, 0.4, 0.5$ . For each value, we randomly synthesized two augmentations and ran CNN models in this experiment. In Figure 6, all operations in AMR-DA contribute to performance gain. On average, operations achieve more significant gains in smaller datasets.
309
+
310
+ # F Detailed Experimental Results
311
+
312
+ Table F.6 and F.7 are detailed versions of Table 3. Table F.8 is the detailed version of Table 5. Table F.9 is the detailed version of Table 1. Table F.10 is the detailed version of Table 4.
313
+
314
+ <table><tr><td>Source</td><td>IDS Tirana is a football club based in Tirana, Albania.</td></tr><tr><td>BT</td><td>IDS Tirana is a football club from Tirana, Albania.</td></tr><tr><td>AMR-Ori</td><td>The football club IDS Tirana is based in Tirana, Albania.</td></tr><tr><td>Source</td><td>The library was established through the philanthropy of Martha Bayard Stevens.</td></tr><tr><td>BT</td><td>The library was founded through the philanthropy of Martha Bayard Stevens.</td></tr><tr><td>AMR-Ori</td><td>Martha Bayard Stevens philanthropy has established a library.</td></tr><tr><td>Source</td><td>A meeting of promoters was also held at Presbyterian Church.</td></tr><tr><td>BT</td><td>A meeting of the project promoters was also held in the Presbyterian Church.</td></tr><tr><td>AMR-Ori</td><td>The promoters also held a meeting at the Presbyterian Church.</td></tr><tr><td>Source</td><td>He died suddenly on his way home from work on 23 December 1970.</td></tr><tr><td>BT</td><td>On December 23, 1970, he died suddenly on his way home from work.</td></tr><tr><td>AMR-Ori</td><td>On 23 December 1970, when he went home from work, he suddenly died.</td></tr><tr><td>Source</td><td>Supported by a senior leadership team he assembled he took the organization from near insolvency to financial security and a higher level of service delivery.</td></tr><tr><td>BT</td><td>Supported by a management team he assembled, he led the organization from near bankruptcy to financial security and improved service delivery.</td></tr><tr><td>AMR-Ori</td><td>With the support of his assembled senior leadership team, he took the organization from near non-financial security to higher levels of service delivery.</td></tr><tr><td>Source</td><td>Malaika Arora, Geeta Kapoor, and Terence Lewis is going to Judge of Sony TV&#x27;s dance reality show India&#x27;s Best Dancer.</td></tr><tr><td>BT</td><td>Malaika Arora, Geeta Kapoor and Terence Lewis will be the judges of Sony TV&#x27;s dance reality show India&#x27;s Best Dancer.</td></tr><tr><td>AMR-Ori</td><td>Malaika Arora, Geeta Kapoor and Terence Lewis are judges for Sony TV &#x27;s dance reality show Best Dancer.</td></tr><tr><td>Source</td><td>The Yurts lay the foundation for the whole philosophy of family relationships to which nomadic societies have always attached significant importance.</td></tr><tr><td>BT</td><td>The yurts form the basis of the whole philosophy of family relations, to which nomadic societies have always attached great importance.</td></tr><tr><td>AMR-Ori</td><td>The whole philosophy of family relationships, which nomad societies always attach significant importance, was laid by the Yurts.</td></tr><tr><td>Source</td><td>From then on, I went through different adventures and endangered my life many times.</td></tr><tr><td>BT</td><td>From then on, I experienced various adventures and was in danger of my life many times.</td></tr><tr><td>AMR-Ori</td><td>From then on, I have gone through different adventures, and have put my life in danger many times.</td></tr><tr><td>Source</td><td>Comedian Bharti Singh will Host this show along with her husband writer Haarsh Limbachiyaa.</td></tr><tr><td>BT</td><td>Comedian Bharti Singh will host the show with her husband, writer Haarsh Lim-bachiyyaa.</td></tr><tr><td>AMR-Ori</td><td>This show will be hosted by comedian Bharti Singh&#x27;s husband, writer Haarsh lim-bachiyyaa.</td></tr></table>
315
+
316
+ Table D.3: Sentences generated using back-translation and using AMR-Ori. BT: back-translation
317
+
318
+ <table><tr><td>Operation</td><td>EDA</td><td>AMR-DA</td></tr><tr><td>None</td><td>A sad, superior human comedy played out on the back roads of life.</td><td>The superior human sad comedy plays out on the back road of life.</td></tr><tr><td>SR</td><td>A lamentable, superior human comedy played out on the backward road of life.</td><td>A top human regrettable comedy plays out on the backroads of life.</td></tr><tr><td>RI</td><td>A sad, superior human comedy played out on funniness the back roads of life.</td><td>The superior human sad comedy of warmth plays out on the back road of life.</td></tr><tr><td>RS</td><td>A sad, superior human comedy played out on roads back the of life.</td><td>The superior human back comedy plays out on the sad road of life.</td></tr><tr><td>RD</td><td>A sad, superior human comedy played out on the baek roads of life.</td><td>The sad superior human comedy plays out on the baek road of life.</td></tr><tr><td>None</td><td>the solid filmmaking and convincing characters makes this a high water mark for this genre.</td><td>Solid filmthings and convincing characters make this a high - watermark for these genera.</td></tr><tr><td>SR</td><td>the solid filmmaking and convert characters makes this a high water mark for this genre</td><td>Solid motion pictures and convincing characters make these high - watermarks for this genre.</td></tr><tr><td>RI</td><td>in high spirits the solid filmmaking and convincing characters makes this a high water mark for this genre.</td><td>This solid, entertaining filmthings, and convincing character, makes a high water mark for this genre.</td></tr><tr><td>RS</td><td>the solid filmmaking and convincing characters makes this a high water mark this genre for</td><td>This is a high water mark for this genre , with convincing characters and solid films.</td></tr><tr><td>RD</td><td>the solid filmmaking and convincing characters makes this a high water mark for this genre</td><td>Solid filsmsmaking and econvineing characters make a high water mark for this genre.</td></tr><tr><td>None</td><td>in addition, his album bat out of hell stayed nine years on the english charts, and sold more than 40 million copies worldwide.</td><td>And his album, Bat Out of Hell, has stayed on the English charts for 9 years, and sold more than 40 million copies worldwide.</td></tr><tr><td>SR</td><td>in addition, his album lick out of hell stayed niner years on the english charts and sold more than 40 million replicate worldwide.</td><td>And his album &quot;Bat Out of Hell&quot; has stayed on the charts in England for 9 years and sold more than 40 million copies worldwide.</td></tr><tr><td>RI</td><td>holdup delay in addition, his more than album bat out of hell stayed nine years on the english charts, and sold more than 40 million copies worldwide.</td><td>And his album, Bat Out of Hell, has stayed on the charts in England correctly for 9 years, and sold more than 40 million copies worldwide .</td></tr><tr><td>RS</td><td>the addition, his album bat out of hell stayed nine years on in english charts and sold copies than million more worldwide.</td><td>And his album, Bat out of Hell, stayed at more than 40 million copies for 9 years, and sold worldwide on the chart in England.</td></tr><tr><td>RD</td><td>in addition, his album bat out of hell stayed nine years on the english charts, and sold more than 40 million copies worldwide.</td><td>And his album, Bat Out of Hell, has stayed on the English charts for a long time, sell-ing more than 40 million copies world-wide.</td></tr></table>
319
+
320
+ Table D.4: Sentences generated using EDA and using our data augmentation method AMR-DA. EDA returns the input sentence with "None" operation, while AMR-DA returns a paraphrased sentence. SR: synonym replacement. RI: random insertion. RS: random swap. RD: random deletion.
321
+
322
+ ![](images/1ac1175d25438661fe39912be8170ba9750b89a05116ae77a4fed997622545e8.jpg)
323
+
324
+ ![](images/fd61b2d4cd492349beb403c7737f2c0d21c0a17d928ada7c161f65a3e4fe1e1b.jpg)
325
+ (b) RD
326
+
327
+ ![](images/8caa2ddecc0d91c87afd9677ce1ea8e32b595351f02d03c69ff3e8e14e0c6ef6.jpg)
328
+ (a) RS
329
+ (c) RI
330
+
331
+ ![](images/c8cb946370f851509ee08b093016894a627fa23166cfa805e7df627dab00abcc.jpg)
332
+ (d) SR
333
+ Figure 6: Average performance gain of individual AMR-DA operations over four text classification datasets for different training set sizes. $\alpha$ roughly controls the range that the operation can impact in each augmentation.
334
+
335
+ <table><tr><td>Dataset</td><td># Classes</td><td># Train samples</td><td># Test samples</td><td>Average length</td><td>Vocabulary size</td></tr><tr><td>SST-2</td><td>2</td><td>7,791</td><td>1,821</td><td>19</td><td>15,771</td></tr><tr><td>CR</td><td>2</td><td>4,068</td><td>451</td><td>19</td><td>9,048</td></tr><tr><td>SUBJ</td><td>2</td><td>9,000</td><td>1,000</td><td>25</td><td>22,715</td></tr><tr><td>PC</td><td>2</td><td>40,000</td><td>26,090</td><td>7</td><td>26,090</td></tr></table>
336
+
337
+ Table F.5: Statistics of four text classification datasets.
338
+
339
+ <table><tr><td>Model</td><td>STS12</td><td>STS13</td><td>STS14</td><td>STS15</td><td>STS16</td><td>STS-B</td><td>SICK-R</td><td>Avg.</td></tr><tr><td>ConBERT-BERTbase†</td><td>64.64</td><td>78.49</td><td>69.07</td><td>79.72</td><td>75.95</td><td>73.97</td><td>67.31</td><td>72.74</td></tr><tr><td>AMR-ConBERT-BERTbase</td><td>71.98</td><td>81.96</td><td>72.91</td><td>82.00</td><td>76.31</td><td>77.00</td><td>70.85</td><td>76.14</td></tr><tr><td>ConBERT-BERTlarge†</td><td>70.69</td><td>82.96</td><td>74.13</td><td>82.78</td><td>76.66</td><td>77.53</td><td>70.37</td><td>76.45</td></tr><tr><td>AMR-ConBERT-BERTlarge</td><td>73.93</td><td>85.45</td><td>76.27</td><td>82.86</td><td>77.87</td><td>79.28</td><td>71.65</td><td>78.19</td></tr></table>
340
+
341
+ Table F.6: The performance comparison of ConSERT with AMR-ConSERT in the unsupervised setting. We report Spearman correlation magnified by a factor of 100 on all splits of seven STS datasets. †: results from Yan et al., 2021.
342
+
343
+ <table><tr><td>Model</td><td>STS12</td><td>STS13</td><td>STS14</td><td>STS15</td><td>STS16</td><td>STS-B</td><td>SICK-R</td><td>Avg.</td></tr><tr><td>SimCSE-BERTbase‡</td><td>68.40</td><td>82.41</td><td>74.38</td><td>80.91</td><td>78.56</td><td>76.85</td><td>72.23</td><td>76.25</td></tr><tr><td>ESimCSE-BERTbase§</td><td>73.40</td><td>83.27</td><td>73.83</td><td>82.66</td><td>78.81</td><td>80.17</td><td>72.30</td><td>78.27</td></tr><tr><td>AMR-SimCSE-BERTbase</td><td>72.51</td><td>83.40</td><td>75.91</td><td>83.35</td><td>79.70</td><td>78.94</td><td>71.86</td><td>77.95</td></tr><tr><td>SimCSE-BERTlarge‡</td><td>70.88</td><td>84.16</td><td>76.43</td><td>84.50</td><td>79.76</td><td>79.26</td><td>73.88</td><td>78.41</td></tr><tr><td>ESimCSE-BERTlarge§</td><td>73.21</td><td>85.37</td><td>77.73</td><td>84.30</td><td>78.92</td><td>80.73</td><td>74.89</td><td>79.31</td></tr><tr><td>AMR-SimCSE-BERTlarge</td><td>75.47</td><td>84.77</td><td>77.56</td><td>85.49</td><td>80.06</td><td>80.28</td><td>73.81</td><td>79.63</td></tr><tr><td>SimCSE-RoBERTa base‡</td><td>70.16</td><td>81.77</td><td>73.24</td><td>81.36</td><td>80.65</td><td>80.22</td><td>68.56</td><td>76.57</td></tr><tr><td>ESimCSE-RoBERTa base§</td><td>69.90</td><td>82.50</td><td>74.68</td><td>83.19</td><td>80.30</td><td>80.99</td><td>70.54</td><td>77.44</td></tr><tr><td>AMR-SimCSE-RoBERTa base</td><td>74.80</td><td>82.67</td><td>75.42</td><td>82.57</td><td>80.49</td><td>80.36</td><td>72.70</td><td>78.43</td></tr><tr><td>SimCSE-RoBERTalarge‡</td><td>72.86</td><td>83.99</td><td>75.62</td><td>84.77</td><td>81.80</td><td>81.98</td><td>71.26</td><td>78.90</td></tr><tr><td>ESimCSE-RoBERTalarge§</td><td>73.20</td><td>84.93</td><td>76.88</td><td>84.86</td><td>81.21</td><td>82.79</td><td>72.27</td><td>79.45</td></tr><tr><td>AMR-SimCSE-RoBERTalarge</td><td>74.35</td><td>84.72</td><td>77.32</td><td>85.90</td><td>81.77</td><td>81.07</td><td>72.76</td><td>79.70</td></tr></table>
344
+
345
+ Table F.7: The performance comparison of unsupervised SimCSE and its variants on seven STS test splits. The reported score is Spearman correlation magnified by a factor of 100. $\ddagger$ : results from Gao et al., 2021; $\S$ : results from Wu et al., 2021.
346
+
347
+ <table><tr><td>Model</td><td>STS12</td><td>STS13</td><td>STS14</td><td>STS15</td><td>STS16</td><td>STS-B</td><td>SICK-R</td><td>Avg.</td></tr><tr><td rowspan="2">BERTbase-flow†</td><td colspan="8">Using STS unlabeled texts</td></tr><tr><td>63.48</td><td>72.14</td><td>68.42</td><td>73.77</td><td>75.37</td><td>70.72</td><td>63.11</td><td>69.57</td></tr><tr><td>ConBERT-BERTbase†</td><td>64.64</td><td>78.49</td><td>69.07</td><td>79.72</td><td>75.95</td><td>73.97</td><td>67.31</td><td>72.74</td></tr><tr><td>+AMR-SR augmentation</td><td>71.33</td><td>78.37</td><td>71.99</td><td>83.34</td><td>75.24</td><td>76.89</td><td>72.62</td><td>75.68</td></tr><tr><td>+AMR-RD augmentation</td><td>64.31</td><td>80.69</td><td>71.87</td><td>81.73</td><td>76.76</td><td>75.78</td><td>69.28</td><td>74.34</td></tr><tr><td>+AMR-RI augmentation</td><td>67.40</td><td>79.24</td><td>71.35</td><td>82.56</td><td>76.07</td><td>77.31</td><td>73.22</td><td>75.31</td></tr><tr><td>+AMR-RS augmentation</td><td>72.01</td><td>82.19</td><td>72.94</td><td>81.93</td><td>76.15</td><td>77.24</td><td>70.31</td><td>76.11</td></tr><tr><td>+AMR-Ori augmentation</td><td>71.98</td><td>81.96</td><td>72.91</td><td>82.00</td><td>76.31</td><td>77.00</td><td>70.85</td><td>76.14</td></tr></table>
348
+
349
+ Table F.8: Performance comparison of models with different DA methods. †: results from Yan et al., 2021.
350
+
351
+ <table><tr><td>Model</td><td>STS12</td><td>STS13</td><td>STS14</td><td>STS15</td><td>STS16</td><td>STS-B</td><td>SICK-R</td><td>Avg.</td></tr><tr><td colspan="9">Using Wiki texts</td></tr><tr><td>BERTbase-flow‡</td><td>58.40</td><td>67.10</td><td>60.85</td><td>75.16</td><td>71.22</td><td>68.66</td><td>64.47</td><td>66.55</td></tr><tr><td>SimCSE-BERTbase‡</td><td>68.40</td><td>82.41</td><td>74.38</td><td>80.91</td><td>78.56</td><td>76.85</td><td>72.23</td><td>76.25</td></tr><tr><td>+word repetition§</td><td>69.79</td><td>83.43</td><td>75.65</td><td>82.44</td><td>79.43</td><td>79.44</td><td>71.86</td><td>77.43</td></tr><tr><td>+back translation</td><td>66.50</td><td>74.53</td><td>66.34</td><td>76.61</td><td>77.33</td><td>72.15</td><td>68.54</td><td>71.71</td></tr><tr><td>+AMR(BARTbase generator)-SimCSE</td><td>72.30</td><td>83.15</td><td>75.53</td><td>83.17</td><td>79.23</td><td>78.15</td><td>73.16</td><td>77.81</td></tr><tr><td>+AMR(T5small generator)-SimCSE</td><td>72.26</td><td>81.77</td><td>75.93</td><td>83.44</td><td>79.78</td><td>77.93</td><td>72.44</td><td>77.65</td></tr><tr><td>+AMR(T5base generator)-SimCSE</td><td>72.51</td><td>83.40</td><td>75.91</td><td>83.35</td><td>79.70</td><td>78.94</td><td>71.86</td><td>77.95</td></tr></table>
352
+
353
+ Table F.9: Performance comparison of AMR-DA (Ori) with different generators. $\ddagger$ : results from Gao et al., 2021; $\S$ : results from Wu et al., 2021.
354
+
355
+ <table><tr><td></td><td>CR</td><td>SST2</td><td>SUBJ</td><td>PC</td><td>Avg.</td></tr><tr><td>RNN</td><td>79.38</td><td>82.32</td><td>91.96</td><td>92.31</td><td>86.49</td></tr><tr><td>+EDA (num AUG=1)</td><td>80.95</td><td>82.04</td><td>91.43</td><td>90.22</td><td>86.16</td></tr><tr><td>+AEDA (num AUG=1)</td><td>82.22</td><td>82.86</td><td>92.56</td><td>92.70</td><td>87.59</td></tr><tr><td>+AMR-DA (num AUG=1)</td><td>81.70</td><td>83.37</td><td>92.68</td><td>92.76</td><td>87.63</td></tr><tr><td>+EDA (num AUG=5)</td><td>80.93</td><td>82.99</td><td>91.14</td><td>90.42</td><td>86.37</td></tr><tr><td>+AEDA (num AUG=5)</td><td>80.53</td><td>83.10</td><td>92.62</td><td>92.59</td><td>87.21</td></tr><tr><td>+AMR-DA (num AUG=5)</td><td>82.93</td><td>83.74</td><td>92.72</td><td>92.60</td><td>88.00</td></tr><tr><td>CNN</td><td>83.68</td><td>84.28</td><td>91.84</td><td>92.79</td><td>88.15</td></tr><tr><td>+EDA (num AUG=1)</td><td>82.90</td><td>83.62</td><td>91.51</td><td>90.79</td><td>87.20</td></tr><tr><td>+AEDA (num AUG=1)</td><td>83.55</td><td>84.50</td><td>92.48</td><td>92.65</td><td>88.30</td></tr><tr><td>+AMR-DA (num AUG=1)</td><td>83.85</td><td>84.68</td><td>92.38</td><td>92.70</td><td>88.40</td></tr><tr><td>+EDA (num AUG=5)</td><td>83.59</td><td>84.12</td><td>91.90</td><td>91.40</td><td>87.75</td></tr><tr><td>+AEDA (num AUG=5)</td><td>84.75</td><td>85.11</td><td>92.68</td><td>92.59</td><td>88.78</td></tr><tr><td>+AMR-DA (num AUG=5)</td><td>85.05</td><td>84.94</td><td>92.54</td><td>92.67</td><td>88.80</td></tr><tr><td>BERT</td><td>89.67</td><td>90.72</td><td>96.38</td><td>95.98</td><td>93.19</td></tr><tr><td>+EDA (num AUG=1)</td><td>90.73</td><td>91.22</td><td>95.88</td><td>95.74</td><td>93.39</td></tr><tr><td>+AEDA (num AUG=1)</td><td>90.15</td><td>90.42</td><td>96.26</td><td>95.94</td><td>93.19</td></tr><tr><td>+AMR-DA (num AUG=1)</td><td>90.53</td><td>90.90</td><td>96.52</td><td>95.92</td><td>93.47</td></tr><tr><td>+EDA (num AUG=5)</td><td>89.80</td><td>91.76</td><td>95.70</td><td>95.88</td><td>93.29</td></tr><tr><td>+AEDA (num AUG=5)</td><td>90.01</td><td>91.71</td><td>96.50</td><td>95.89</td><td>93.53</td></tr><tr><td>+AMR-DA (num AUG=5)</td><td>90.47</td><td>91.02</td><td>96.70</td><td>95.97</td><td>93.54</td></tr></table>
356
+
357
+ Table F.10: Average performance of CNN, RNN and BERT on four classification datasets.
amrdadataaugmentationbyabstractmeaningrepresentation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:613bab20892ea1b1d1dc8a32e7a72e42d6fc0f8792fcff140d738932f8af76bd
3
+ size 1848641
amrdadataaugmentationbyabstractmeaningrepresentation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4f3885599fe3f3d723526affb95282a8a1b722be2d406c063780960045caed9
3
+ size 431949
anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/61085cb9-f4a8-41f8-a8c2-c428a475f780_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a536fd5d572d459f08b19ad8428e3884a8117ca73a5548204c93a496d119fa91
3
+ size 74172
anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/61085cb9-f4a8-41f8-a8c2-c428a475f780_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57387d8c6874374019bd3ffb78fa3b7212e0a5a22e737efdd25b9bb96400c3b9
3
+ size 86964
anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/61085cb9-f4a8-41f8-a8c2-c428a475f780_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07f3e05d0aedf10bafab1a9f242a0114880dd912c943d115fd9c888e3db0febb
3
+ size 738186
anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/full.md ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # An Accurate Unsupervised Method for Joint Entity Alignment and Dangling Entity Detection
2
+
3
+ Shengxuan Luo $^{1,2}$ Sheng Yu $^{1,2}$
4
+
5
+ <sup>1</sup>Center for Statistical Science, Tsinghua University
6
+
7
+ $^{2}$ Department of Industrial Engineering, Tsinghua University
8
+
9
+ luosx18@mails.tsinghua.edu.cn
10
+
11
+ syu@tsinghua.edu.cn
12
+
13
+ # Abstract
14
+
15
+ Knowledge graph integration typically suffers from the widely existing dangling entities that cannot find alignment cross knowledge graphs (KGs). The dangling entity set is unavailable in most real-world scenarios, and manually mining the entity pairs that consist of entities with the same meaning is labor-consuming. In this paper, we propose a novel accurate Unsupervised method for joint Entity alignment (EA) and Dangling entity detection (DED), called UED. The UED mines the literal semantic information to generate pseudo entity pairs and globally guided alignment information for EA and then utilizes the EA results to assist the DED. We construct a medical crosslingual knowledge graph dataset, MedED, providing data for both the EA and DED tasks. Extensive experiments demonstrate that in the EA task, UED achieves EA results comparable to those of state-of-the-art supervised EA baselines and outperforms the current state-of-the-art EA methods by combining supervised EA data. For the DED task, UED obtains high-quality results without supervision.
16
+
17
+ # 1 Introduction
18
+
19
+ Entity alignment (EA) that aligns the equivalent entities in different knowledge graphs (KGs) is a fundamental technique for knowledge graph integration. A typical application of EA is constructing a large-scale KG by integrating different KGs to facilitate various downstream tasks such as question answering (Savenkov and Agichtein, 2016; Yu et al., 2017; Jin et al., 2022), recommendation (Cao et al., 2019), and search engines (Xiong et al., 2017). The existing embedding-based EA methods align each entity to its closest counterpart cross KGs according to entity embeddings. In recent years, they have emerged as the dominant EA solutions due to their effectiveness and strong ability to utilize information such as entity name strings, entity description, attributes, and graph structure.
20
+
21
+ These EA methods (Chen et al., 2017; Sun et al., 2018; Wang et al., 2018; Zhu et al., 2021a; Liu et al., 2021; Lin et al., 2021) are built upon the assumption that there exists a counterpart in the target KG for any source entity (Sun et al., 2021). Therefore, ideally, their performances are assessed by only considering the entities in the set of testing entity pairs.
22
+
23
+ In the real-world scenario, four facts should be considered when aligning KGs: (1) The entities that do not have counterparts in another KG are ubiquitous. These entities are referred to as dangling entities, following Sun et al. (2021). Therefore, it is necessary to identify the dangling entities and then align the remaining matchable entities to their counterparts. The widely used approach of integrating KGs according to the cross KG similarity between entities loses sight of identifying dangling entities. (2) Dangling entity sets are not labeled in most cases, while some entity pairs are relatively available but labor-consuming. For example, we can preliminarily obtain pseudo entity pairs with high similarity according to extra information to align entities and then manually extract the correct pairs. The extra information could be cross KG links or literal semantic information from machine translation or word embeddings. However, identifying a dangling entity requires manual comparisons between an entity and all entities in the target KG, which is tedious and almost impossible for large KGs. Dangling entity detection (DED) methods need to avoid reliance on supervision. (3) Literal semantic information has an essential impact on EA. As shown in previous works (Wu et al., 2019; Nguyen et al., 2020; Zhu et al., 2021b), competitive EA results can be achieved by translating entity names to the same language and calculating the vector representation from GloVe (Pennington et al., 2014), suggesting that it is possible to get rid of manually annotated entity pairs by automatically mining literal semantic information. (4) Align-
24
+
25
+ ments are associated with each other. Traditional EA methods align entities in the local alignment way by calculating the cross KGs similarity of entities and selecting the most similar entity as EA results. The local alignment neglects the association between alignment and suffers from conflicting many-to-one and many-to-many alignments.
26
+
27
+ Considering the above facts, we propose UED, an accurate Unsupervised method for joint EA and DED. For EA, to automatically mine the literal semantic information, we generate pseudo entity pairs for the align loss and design a semantic-based globally guided loss to guide the alignment for all entities, not only for those in entity pairs. For DED, since verifying the dangling entity has to check all the entities in the target KG and the dangling entity set is unavailable, we add empty entities into two KGs and transfer the EA and DED tasks into a modified global optimal transport problem (OTP) to identify dangling entities relying on pseudo entity pairs only. We propose a simple but effective way to reduce the complexity of OTP. Our experiments show that the dangling entity identification mechanism also enhances the EA performance.
28
+
29
+ There are several traditional EA datasets widely used in the EA task. Nevertheless, neither dataset provides a dangling test set for DED. As mentioned above, identifying dangling entities is crucial in real-world knowledge graph integration. To demonstrate the effectiveness of our method and incentivize future studies, we construct a cross-lingual medical knowledge graph dataset with EA task and DED task, called MedED, based on the Unified Medical Language System (UMLS) (Lindberg et al., 1993).
30
+
31
+ We summarize the main contributions as follows:
32
+
33
+ - We construct a cross-lingual knowledge graph dataset to demonstrate the effect of our designs and support future studies on EA and DED.
34
+ - We propose UED, a unified unsupervised method for both EA and DED, which gets rid of supervision in both tasks and fits the real-world scenario when aligning KGs. UED mines the literal semantic information for EA and then utilizes the EA results on pseudo entity pairs to generate high-quality DED results and consequently facilitates the performance of EA.
35
+
36
+ - We conduct comprehensive experiments on both MedED and DBP15K. In the EA task, UED achieves comparable results with state-of-the-art supervised baselines, and the supervised version of UED outperforms the current state-of-the-art methods.
37
+
38
+ The source code of UED is publicly available at https://github.com/luosx18/UED.
39
+
40
+ # 2 Related Work
41
+
42
+ # Embedding-based Entity Alignment
43
+
44
+ Embedding-based entity alignment methods build upon knowledge embedding models, which have been developing rapidly in recent years and aim to encode KGs into low-dimensional vector space. The mainstream embedding-based EA methods adopt models such as TransE (Bordes et al., 2013), GCN (Kipf and Welling, 2016), GAT (Velicković et al., 2017), and the other variants (Sun et al., 2017; Zhu et al., 2021b), to represent entities of different KGs in vector space. Then they find equivalent entity pairs between KGs in the local alignment way.
45
+
46
+ The critical point of these EA methods is to include more semantic information in KGs accurately and effectively. The semantic information comprises graph structure, attributes, and literal information, but not all KGs contain all information mentioned above. All embedding-based EA methods adopt graph structures (Chen et al., 2017), while some methods utilize attributes (Sun et al., 2017; Trisedya et al., 2019) or literal information (Xu et al., 2019; Wu et al., 2019; Zhu et al., 2021a). To alleviate the insufficiency of training data, some studies attempt to leverage bootstrapping, iterative training techniques, and self-supervised learning to enrich the training entity pairs with pseudo pairs (Sun et al., 2018; Mao et al., 2020; Liu et al., 2021). The proposed method utilizes literal semantic information to generate alignment guidance for all entities in KGs without supervision and is compatible with all graph embedding models mentioned above.
47
+
48
+ # Global Entity Alignment
49
+
50
+ Local alignment ignores the fact that alignments are associated with each other, resulting in incorrect alignments and illegal many-to-one and many-to-many alignments (Xu et al., 2020; Zeng et al., 2020). Global EA methods that consider all alignments together have been proposed to mitigate
51
+
52
+ these issues but require relatively good quality local EA to avoid the accumulation of incorrect alignments. Unfortunately, according to the Hungarian algorithm (Kuhn, 1955), the complexity of finding the best alignment between two KGs of $n$ entities is $O(n^4)$ . The existing approximate global alignment methods, CEA (Zeng et al., 2020) and GM-EHD-JEA (Xu et al., 2020), reduce the complexity with extra constraints. The CEA requires the entity pairs to be stable matches and uses the deferred acceptance algorithm (DAA) to find the alignments. The GM-EHD-JEA decomposes the entire search space into many isolated subspaces and consequently restricts the cross-subspace alignment.
53
+
54
+ # Dangling Entity Detection
55
+
56
+ Several recent studies emphasize the problem of dangling entities in EA tasks. Zhao et al. (2020) and Zeng et al. (2021) introduce threshold-based methods to identify dangling entities according to the distance between a source entity and its closest target entity. These two methods identify dangling entities to improve EA behavior. Sun et al. (2021) also studied the performance of DED in the supervised setting by using the dangling training set to train the classification model or marginal ranking model.
57
+
58
+ Our method transfers the global EA and the DED into a modified unified optimal transport problem and consequently relieves the constraints on global EA, utilizes the association between alignment, and does not rely on dangling entity labels.
59
+
60
+ # 3 UED Framework
61
+
62
+ In this section, we first briefly describe the tasks of EA and DED and then elucidate our unified unsupervised approach to solve EA along with DED. An overview of our method is depicted in Figure 1.
63
+
64
+ # 3.1 Task Definition
65
+
66
+ Formally, a KG is denoted as $\mathcal{G} = \{\mathcal{E},\mathcal{R},\mathcal{T}\}$ , where $\mathcal{E} = \mathcal{D}\cup \mathcal{A}$ is the disjoint union of dangling set $\mathcal{D}$ and matchable set $\mathcal{A}$ . $\mathcal{R}$ and $\mathcal{T}$ denote the set of relations and triples, respectively. For two KGs, $\mathcal{G}_1$ and $\mathcal{G}_2$ , the DED task aims to find $\mathcal{D}_1$ and $\mathcal{D}_2$ , while the EA task aims to find the entity pairs between the remaining set, $\mathcal{A}_1$ and $\mathcal{A}_2$ .
67
+
68
+ # 3.2 Pseudo Entity Pairs
69
+
70
+ Manually generating entity pairs to train the embedding base EA model is labor-consuming. We automatically generate pseudo entity pairs for model
71
+
72
+ ![](images/1cfeb529065240f22feb8e302c8f3d7497b32ee92b49615ebc95a8a4d3330e4a.jpg)
73
+ Figure 1: The Framework of UED. The rounded rectangles with dashed line denote the main modules. The circles with a number are matchable entities, and the circles with slash denote dangling entities. The gray circles are the empty entities and the gray rectangles in distance matrix denotes distance between empty entity to other entities. MT and WE refer to machine translation and word embeddings.
74
+
75
+ training, relying only on machine translation and word embeddings.
76
+
77
+ In our approach, we utilize GloVe (Pennington et al., 2014) word embeddings to generate the mean word vector $v_{i}$ for entity $e_{i}$ based on the entity name. Then the initial similarity between $e_{i} \in \mathcal{G}_{1}$ and $e_{j} \in \mathcal{G}_{2}$ is defined as the cosine similarity $s_{ij} = \cos(v_{i}, v_{j})$ . The set of pseudo entity pairs consists of entity pairs with high similarity. Specifically, we define a threshold $\varepsilon < 1$ . If $s_{ij}$ satisfies:
78
+
79
+ $$
80
+ s _ {i j} > \varepsilon ,
81
+ $$
82
+
83
+ $$
84
+ s _ {i k} \leq \varepsilon , \forall k \neq j, \tag {1}
85
+ $$
86
+
87
+ $$
88
+ s _ {l j} \leq \varepsilon , \forall l \neq i,
89
+ $$
90
+
91
+ then pair $(e_i, e_j)$ is added to the pseudo entity pairs set $\mathcal{P}$ . For cross-lingual KGs, we translate entity names using machine translation before applying the word embeddings.
92
+
93
+ # 3.3 Information Aggregating
94
+
95
+ Our method is compatible with all graph embedding models. In this paper, we follow the widespread setting to use relation triples as graph structure information and entity names as literal
96
+
97
+ information (Xu et al., 2019; Wu et al., 2019; Mao et al., 2020; Nguyen et al., 2020; Zhu et al., 2021a). We use a graph embedding model to aggregate the initial embeddings and relation triples to generate enhanced entity embeddings, $X_{e}$ .
98
+
99
+ Unlike previous works (Xu et al., 2019; Mao et al., 2020; Nguyen et al., 2020; Zhu et al., 2021a), we use pseudo entity pairs to train the graph embedding model instead of training entity pairs. Denoting $X_{e_i}$ as the output embeddings of entity $e_i$ after the graph embedding model, we modify the hinge loss with the pseudo entity pairs, denoted as align loss:
100
+
101
+ $$
102
+ \begin{array}{l} \mathcal {L} _ {a} = \sum_ {(e _ {i}, e _ {j}) \in \mathcal {P}} \sum_ {(e _ {i} ^ {\prime}, e _ {j} ^ {\prime}) \in \mathcal {P} ^ {\prime} (e _ {i}, e _ {j})} \max \left(d \left(X _ {e _ {i}}, X _ {e _ {j}}\right) \right. \\ - d \left(X _ {e _ {i} ^ {\prime}}, X _ {e _ {j} ^ {\prime}}\right) + \lambda , 0), \tag {2} \\ \end{array}
103
+ $$
104
+
105
+ where $\lambda$ is the margin, $\mathcal{P}'(e_i,e_j)$ is the set of negative samples for $(e_i,e_j)$ by replacing $e_i$ or $e_j$ with their neighbors, and $d(\cdot ,\cdot)$ is the Manhattan distance following previous works (Wu et al., 2019; Zhu et al., 2021a).
106
+
107
+ # 3.4 Globally Guided Similarity and Loss
108
+
109
+ The align loss does not make full use of literal semantic information since the initial similarity $s_{ij}$ contains entity alignment information for entities not in $\mathcal{P}$ . In addition, training an EA model with the align loss may mislead the model to pay too much attention to the entities in $\mathcal{P}$ . Therefore, we regard entities in the target KG as anchors to guide the EA training for all source entities. Our assumption is that the counterpart of an entity is more likely to occur among entities whose initial embeddings are more similar. Specifically, we propose a globally guided loss:
110
+
111
+ $$
112
+ \begin{array}{l} \mathcal {L} _ {g} = \sum_ {(e _ {i}, e _ {j}) \in \mathcal {Q}} s _ {i j} \sum_ {(e _ {i} ^ {\prime}, e _ {j} ^ {\prime}) \in \mathcal {Q} ^ {\prime} (e _ {i}, e _ {j})} \max \left(d (X _ {e _ {i}}, X _ {e _ {j}}) \right. \\ - d \left(X _ {e _ {i} ^ {\prime}}, X _ {e _ {j} ^ {\prime}}\right) + \lambda , 0), \tag {3} \\ \end{array}
113
+ $$
114
+
115
+ where $\mathcal{Q}$ consists of all $(e_i, e_j)$ satisfying $e_j$ is one of the top $k$ similar entities of $e_i$ according to the initial semantic similarity $\{s_{ij}, \forall j\}$ , and $k$ is a hyperparameter. The construction of $\mathcal{Q}'$ is similar to $\mathcal{P}'$ . According to our experiments, $s_{ij}$ is a necessary value that refers to the weight of $(e_i, e_j)$ in $\mathcal{L}_g$ to improve model performance. To gradually reduce the impact of entities in $\mathcal{Q}$ , we design a
116
+
117
+ mechanism to decrease the weight of the globally guided loss. The final loss is
118
+
119
+ $$
120
+ \mathcal {L} = \mathcal {L} _ {a} + w (t) \mathcal {L} _ {g}, \tag {4}
121
+ $$
122
+
123
+ where $t$ is the training step, and $w(t)$ decreases linearly to 0 as $t$ increases.
124
+
125
+ # 3.5 Global EA and DED
126
+
127
+ Given two KGs comprising $n$ and $m$ entities, we define a distance matrix $C \in \mathbb{R}^{n \times m}$ with each entry indicating the Manhattan distance between two entities. The global EA task can be formulated into an optimal transport problem (OTP) to find an optimal global alignment by minimizing the total transport distance:
128
+
129
+ $$
130
+ \begin{array}{l} \min \sum_ {i = 1, j = 1} ^ {n, m} C _ {i j} \Psi_ {i j}, \\ \text {s . t .} \sum_ {j} \Psi_ {i j} = 1, 1 \leq i \leq n, \tag {5} \\ \sum_ {i} \Psi_ {i j} = 1, 1 \leq j \leq m, \\ \end{array}
131
+ $$
132
+
133
+ where $\Psi$ is the transport matrix, and $\Psi_{ij} \in \{0,1\}$ for all $i$ and $j$ indicates whether entity $e_i$ in $\mathcal{G}_1$ aligns to $e_j$ in $\mathcal{G}_2$ . The constraints guarantee the one-to-one alignment. Considering that $n \neq m$ in most cases and the existence of dangling entities, this OTP is invalid. To address these issues, we add an empty entity into $\mathcal{G}_1$ and $\mathcal{G}_2$ separately. Without loss of generality, we prepend the empty entity as the first entity in both KGs. Since we have no information for empty entities, we define hyperparameters, $\alpha$ and $\beta$ , to describe the cross KG distance between the empty entity and other entities. Therefore, the OTP is now as follow:
134
+
135
+ $$
136
+ \begin{array}{l} \min \sum_ {i = 1, j = 1} ^ {n + 1, m + 1} C _ {i j} \Psi_ {i j}, \\ s. t. \sum_ {j} \Psi_ {i j} = 1, 2 \leq i \leq n + 1, \tag {6} \\ \sum_ {i} \Psi_ {i j} = 1, 2 \leq j \leq m + 1, \\ \end{array}
137
+ $$
138
+
139
+ where $C_{1,j} = \alpha, \forall j$ and $C_{i,1} = \beta, \forall i$ denote the first row and the first column of the distance matrix, respectively. $\Psi_{ij} \in \{0,1\}$ , and $\Psi_{i,1} = 1$ indicates that entity $e_i$ is dangling, while $\Psi_{1,j} = 1$ also indicates dangling entity $e_j$ . The other $\Psi_{i,j} = 1$ predicts the entity pair $(e_i, e_j)$ .
140
+
141
+ <table><tr><td colspan="2">Datasets</td><td>#Ent.</td><td>#Rel.</td><td>#Trip.</td><td>#Pairs</td><td>#Dang.</td></tr><tr><td rowspan="2">MedED</td><td>FR</td><td>19,382</td><td>431</td><td>455,368</td><td rowspan="2">6,365</td><td>13,017</td></tr><tr><td>EN</td><td>18,632</td><td>622</td><td>841,792</td><td>12,267</td></tr><tr><td rowspan="2">MedED</td><td>ES</td><td>19,228</td><td>546</td><td>594,130</td><td rowspan="2">11,153</td><td>8,075</td></tr><tr><td>EN</td><td>18,632</td><td>622</td><td>841,792</td><td>7,479</td></tr><tr><td rowspan="2">DBP15K</td><td>ZH</td><td>19,388</td><td>1,700</td><td>70,414</td><td rowspan="2">15,000</td><td>-</td></tr><tr><td>EN</td><td>19,572</td><td>1,322</td><td>95,142</td><td>-</td></tr><tr><td rowspan="2">DBP15K</td><td>JA</td><td>19,814</td><td>1,298</td><td>77,214</td><td rowspan="2">15,000</td><td>-</td></tr><tr><td>EN</td><td>19,780</td><td>1,152</td><td>93,484</td><td>-</td></tr><tr><td rowspan="2">DBP15K</td><td>FR</td><td>19,661</td><td>902</td><td>105,998</td><td rowspan="2">15,000</td><td>-</td></tr><tr><td>EN</td><td>19,993</td><td>1,207</td><td>115,722</td><td>-</td></tr></table>
142
+
143
+ Table 1: Statistics of MedED and DBP15K.
144
+
145
+ Our approach now merges the EA and the DED into one OTP. This OTP considers the global alignment information and the interactions among alignments and dangling entity identification. Moreover, considering that similar entities contain more information for both EA and DED, we keep the top $K$ rank similarity entities in the other KG for each entity and drop the remaining entities to reduce the complexity of the OTP. Therefore, we solve the problem with very sparse matrices, $C$ and $\Psi$ . Section 5.3 will show that the method is powerful with acceptable computational complexity after reduction. The last problem is to find the proper $\alpha$ and $\beta$ for both EA and DED. Since we have the pseudo entity pairs set $\mathcal{P}$ in real-world data, we propose an ingenious way to grid search the quantiles of row minimums and column minimums of $C$ synchronously and then select $\alpha^{*}$ and $\beta^{*}$ that achieve the best EA performance on $\mathcal{P}$ . Finally, the entities aligned to the empty entity under given $\alpha^{*}$ and $\beta^{*}$ are dangling entities. The other alignments are the global EA results.
146
+
147
+ # 4 Experimental Setup
148
+
149
+ # 4.1 Datasets and Evaluation
150
+
151
+ Sun et al. (2021) construct a dataset providing EA task and DED task, which contains the information of relation triples only so that the quality of local EA is limited and therefore incompatible with global alignment methods. In this work, we construct a dataset with graph structure and literal semantic information providing both EA and DED tasks.
152
+
153
+ # Dataset Construction
154
+
155
+ The Unified Medical Language System (UMLS) (Lindberg et al., 1993) is a large-scale resource containing over 4 million unique medical concepts and over 87 million relation triples. Concepts in UMLS
156
+
157
+ have several terms in different languages. We extract concepts that contain terms in the selected language as entities to construct new monolingual KG and retain the relations between entities. For the entity names, we select the preferred terms in UMLS. The criterion of entity pairs is whether entities belong to the same concept. Similarly, an entity is dangling if its original concept is not in the other KG. We extracted the KGs of English, French, and Spanish and then constructed the KG pairs of FREN (French to English) and ES-EN (Spanish to English). We select 20 thousand entities with the most relation triples in UMLS for the specified language and then drop the entities unrelated to other selected entities. Table 1 shows the statistics of the new dataset, MedED. For both EA and DED, we split $70\%$ of entity pairs and dangling entities as the test set. Even though our method does not rely on the training set, we keep the remaining $30\%$ as the training set for further model comparison and ablation study.
158
+
159
+ # DBP15K
160
+
161
+ We conduct experiments on the widely used existing EA benchmark, DBP15K (Sun et al., 2017). Three pairs of cross-lingual KGs, ZH-EN (Chinese to English), JA-EN (Japanese to English), and FR-EN (French to English), were built into this dataset. Each KG contains approximately 20 thousand entities, and every KG pair contains 15 thousand entity pairs (Table 1). Following the setting in previous works (Sun et al., 2017; Wu et al., 2019; Zhu et al., 2021a), we keep $70\%$ of entity pairs for testing and $30\%$ for training.
162
+
163
+ # Evaluation
164
+
165
+ We compute two evaluation metrics following previous works for the EA task, Hits@k and mean reciprocal rank (MRR). Hits@k indicates the percentage of the targets that have been correctly ranked in the top K. MRR is the average of the reciprocal of the rank results. The previous EA works compute Hits@k and MRR in a relaxed setting in which only the entities in testing pairs are taken into account, assuming that any source entity has a counterpart in the target KG. In addition to the relaxed evaluation, we also compute Hits@k and MRR in a practical setting in which for every testing entity, the list of candidate counterparts consists of all entities in the other KG. Global alignment methods generate one-to-one entity pairs, and we evaluate Hits@1 for these methods.
166
+
167
+ For the DED task, we compute precision, recall, and F1-score for identifying dangling entities.
168
+
169
+ # 4.2 Compared Methods
170
+
171
+ For the EA task, we compare our approach with previous methods we introduced in Section 2: (1) Init-Emb, the initial embeddings used in UED and main comparison models; (2) the methods based on translational KG embeddings model: MTransE (Chen et al., 2017), JAPE (Sun et al., 2017), and BootEA (Sun et al., 2018); (3) the methods based on graph neural networks: RDGCN (Wu et al., 2019), CEA (Zeng et al., 2020), RNM (Zhu et al., 2021b), RAGA (Zhu et al., 2021a), SelfKG (Liu et al., 2021), EchoEA (Lin et al., 2021).
172
+
173
+ The proposed method is compatible with supervised training entity pairs, so we provide both unsupervised and supervised versions of our method: (1) the unsupervised method, UED, described in Section 3. (2) the supervised version of UED, which combines the training entity pairs and the pseudo entity pairs for the align loss, denoted as UED*.
174
+
175
+ # 4.3 Implementation Details
176
+
177
+ Following Wu et al. (2019), we translate entity names in MedED to English via Google Translate and then use mean of word vector from GloVe (Pennington et al., 2014) to construct the initial entity embeddings. For entities in DBP15K, we inherit the initial embeddings used in previous works (Wu et al., 2019; Zeng et al., 2021; Zhu et al., 2021a,b; Lin et al., 2021). The threshold for pseudo entity pairs $\varepsilon$ is 0.99, and the $k = 3$ in globally guided similarity and loss. The initial value of $w(t)$ is 0.3 and $w(t)$ decreases linearly to 0 at 1/4 of the total training steps. We adopt RAGA (Zhu et al., 2021a) as the embedding-based EA model in Section 3.3 to generate enhanced entity embeddings and use the default setting of hyperparameters in RAGA. For $\alpha^{*}$ and $\beta^{*}$ in the global EA and DED, the default value of $K$ is 100 for our method. We grid search 100 paired quantiles of the row minimums and column minimums of $C$ with $K = 10$ . Then, $\alpha^{*}$ and $\beta^{*}$ are used in the other values of $K$ .
178
+
179
+ # 5 Results
180
+
181
+ # 5.1 Entity Alignment Results
182
+
183
+ Table 2 shows the results of EA on DBP15K and MedED. Following the previous work, we adopt the
184
+
185
+ relaxed evaluation setting. The results with practical evaluation setting are listed in Appendix A.1.
186
+
187
+ In general, for both local and global alignment in DBP15K, the UED achieves comparable results with the previous state-of-the-art baselines. More specifically, for local alignment, the UED achieves the same level behavior as the supervised embedding-based EA method, the RAGA, of which we adopt its graph embedding models. For global alignment, the OTP brings UED a significant improvement, and the UED outperforms all competing methods except the new supervised state-of-the-art method, EchoEA. The Hits@1 of UED for ZHEN, JA-EN, and FR-EN achieves 0.877, 0.915, and 0.975 in DBP15K, respectively. In addition, UED* outperforms all methods and achieves 0.915 and 0.941 Hits@1 for ZH-EN and JA-EN in DBP15K and 0.974 and 0.979 for FR-EN and ES-EN in MedED.
188
+
189
+ # 5.2 Entity Alignment and Dangling Entity Detection Results
190
+
191
+ Table 3 shows the results of EA and DED on MedED. Note that global alignment with DED should consider all entities. We select the practical setting in the EA evaluation.
192
+
193
+ As shown in Table 3, for the EA task, by maximizing the performance of EA on pseudo entity pairs, UED achieves better results compared to the supervised RAGA and the variants of our method with DAA. In addition, the UED ( $K = 100$ ) achieves 0.805 and 0.877 Hits@1 for FR-EN and ES-EN separately. The supervised UED* gains a further improvement of 0.021 and 0.012 Hits@1 for FR-EN and ES-EN separately. For the DED task, the proposed method focuses more on the precision in recognizing dangling entities. The results of UED and UED* are also much better than the Distance. The Distance denotes the baseline by searching the best threshold on the dangling training set for identifying dangling entities according to the smallest distance to entities in another KG. These results imply that UED successfully uses unsupervised EA to assist DED while DED with high precision reduces the scope of EA and enhances the performance of EA. Furthermore, the results with different $K$ show that we don't need a vary large value of $K$ , and there is a tradeoff between improving EA results and DED results: the larger $K$ achieves the better Hits@1 in the EA task and precision in the DED task, while the smaller $K$
194
+
195
+ <table><tr><td rowspan="3"></td><td colspan="9">DBP15K</td><td colspan="6">MedED</td></tr><tr><td colspan="2">ZH-EN</td><td colspan="2">JA-EN</td><td colspan="2">FR-EN</td><td colspan="2">FR-EN</td><td colspan="2">ES-EN</td><td colspan="2">ES-EN</td><td colspan="2">MRR</td><td></td></tr><tr><td>H@1</td><td>H@10</td><td>MRR</td><td>H@1</td><td>H@10</td><td>MRR</td><td>H@1</td><td>H@10</td><td>MRR</td><td>H@1</td><td>H@10</td><td>MRR</td><td>H@1</td><td>H@10</td><td>MRR</td></tr><tr><td colspan="15">Local</td><td></td></tr><tr><td>Init-Emb</td><td>.575</td><td>.689</td><td>.615</td><td>.650</td><td>.754</td><td>.688</td><td>.818</td><td>.888</td><td>.843</td><td>.716</td><td>.845</td><td>.764</td><td>.685</td><td>.826</td><td>.737</td></tr><tr><td>MTransE</td><td>.308</td><td>.614</td><td>.364</td><td>.279</td><td>.575</td><td>.349</td><td>.244</td><td>.556</td><td>.335</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>JAPE</td><td>.731</td><td>.904</td><td>-</td><td>.828</td><td>.947</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>BootEA</td><td>.629</td><td>.848</td><td>.703</td><td>.622</td><td>.854</td><td>.701</td><td>.653</td><td>.874</td><td>.731</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RDGCN</td><td>.708</td><td>.846</td><td>-</td><td>.767</td><td>.895</td><td>-</td><td>.886</td><td>.957</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RNM</td><td>.840</td><td>.919</td><td>.870</td><td>.872</td><td>.944</td><td>.899</td><td>.938</td><td>.981</td><td>.954</td><td></td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RAGA</td><td>.798</td><td>.930</td><td>.847</td><td>.831</td><td>.950</td><td>.875</td><td>.914</td><td>.983</td><td>.940</td><td>.896</td><td>.981</td><td>.930</td><td>.914</td><td>.986</td><td>.943</td></tr><tr><td>SelfKG</td><td>.829</td><td>.919</td><td>-</td><td>.890</td><td>.953</td><td>-</td><td>.959</td><td>.992</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>EchoEA</td><td>.823</td><td>.939</td><td>.865</td><td>.861</td><td>.957</td><td>.897</td><td>.939</td><td>.989</td><td>.958</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>UED</td><td>.779</td><td>.907</td><td>.826</td><td>.820</td><td>.933</td><td>.862</td><td>.921</td><td>.979</td><td>.943</td><td>.895</td><td>.975</td><td>.926</td><td>.893</td><td>.978</td><td>.925</td></tr><tr><td>UED*</td><td>.826</td><td>.943</td><td>.870</td><td>.863</td><td>.960</td><td>.900</td><td>.938</td><td>.987</td><td>.957</td><td>.901</td><td>.981</td><td>.932</td><td>.913</td><td>.987</td><td>.942</td></tr><tr><td colspan="15">Global</td><td></td></tr><tr><td>GM-EHD-JEA</td><td>.736</td><td></td><td></td><td>.792</td><td></td><td></td><td>.924</td><td></td><td></td><td>-</td><td></td><td></td><td>-</td><td></td><td></td></tr><tr><td>CEA</td><td>.787</td><td></td><td></td><td>.863</td><td></td><td></td><td>.972</td><td></td><td></td><td>-</td><td></td><td></td><td>-</td><td></td><td></td></tr><tr><td>RAGA</td><td>.873</td><td></td><td></td><td>.909</td><td></td><td></td><td>.966</td><td></td><td></td><td>.962</td><td></td><td></td><td>.970</td><td></td><td></td></tr><tr><td>EchoEA</td><td>.891</td><td></td><td></td><td>.932</td><td></td><td></td><td>.989</td><td></td><td></td><td>-</td><td></td><td></td><td>-</td><td></td><td></td></tr><tr><td>UED</td><td>.877</td><td></td><td></td><td>.915</td><td></td><td></td><td>.975</td><td></td><td></td><td>.970</td><td></td><td></td><td>.976</td><td></td><td></td></tr><tr><td>UED*</td><td>.915</td><td></td><td></td><td>.941</td><td></td><td></td><td>.984</td><td></td><td></td><td>.974</td><td></td><td></td><td>.979</td><td></td><td></td></tr></table>
196
+
197
+ Table 2: EA results on DBP15K and MedED datasets (relaxed setting). H@1 and H@10 denotes the Hits@1 and Hits@10. The underlined models use the same initial entity embeddings. The results of the compared method in DBP15K are from their original papers. We apply the RAGA in MedED for comparison. The CEA, RAGA and EchoEA use the DAA for global alignment.
198
+
199
+ <table><tr><td></td><td colspan="4">FR-EN</td><td colspan="4">ES-EN</td></tr><tr><td></td><td>EA H@1</td><td>P</td><td>DED R</td><td>F</td><td>EA H@1</td><td>P</td><td>DED R</td><td>F</td></tr><tr><td>RAGA</td><td>.787</td><td>-</td><td>-</td><td>-</td><td>.827</td><td>-</td><td>-</td><td>-</td></tr><tr><td>UED(DAA)</td><td>.774</td><td>-</td><td>-</td><td>-</td><td>.870</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Distance</td><td>-</td><td>.781</td><td>.734</td><td>.757</td><td>-</td><td>.786</td><td>.861</td><td>.822</td></tr><tr><td>UED</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>K=1</td><td>.798</td><td>.961</td><td>.794</td><td>.869</td><td>.860</td><td>.904</td><td>.842</td><td>.872</td></tr><tr><td>K=10</td><td>.803</td><td>.963</td><td>.753</td><td>.845</td><td>.874</td><td>.935</td><td>.684</td><td>.790</td></tr><tr><td>K=100</td><td>.805</td><td>.964</td><td>.748</td><td>.842</td><td>.877</td><td>.933</td><td>.646</td><td>.764</td></tr><tr><td>UED*</td><td>.826</td><td>.976</td><td>.654</td><td>.783</td><td>.901</td><td>.941</td><td>.694</td><td>.799</td></tr></table>
200
+
201
+ Table 3: EA and DED results on MedED (practical setting). H@1, P, R, and F denotes Hits@1, precision, recall, and F-score. $K = 1,10,100$ refers to the proposed global alignment method that keeps the top $K(= 1,10,100)$ rank similarity entities for each entity. The UED(DAA) and RAGA use the DAA for global alignment.
202
+
203
+ achieves the better F1-score in the DED task.
204
+
205
+ # 5.3 Empirical Runtime Analysis
206
+
207
+ The time complexity of the proposed global method is acceptable. The solving process of the OTP could be finished in less than 7, 60, and 5,00 seconds for $K = 1, 10, 100$ in MedED. Without the simplification, the running time will be more than 120,000 seconds. Considering the time consuming and the
208
+
209
+ similar performance of $K = 10$ and $K = 100$ (Table 3), much larger value of $K$ may not bring significant improvement and $K = 100$ is enough for the proposed method.
210
+
211
+ # 6 Ablation Study
212
+
213
+ To quantify the role of our designs, we provide the variants by removing the weight decreasing mechanism of the globally guided loss $\mathcal{L}_g$ and the $\mathcal{L}_g$ from UED (Table 4). In addition, we attempt to replace the proposed OTP with DAA (Table 4). For local alignment, the UED without $\mathcal{L}_g$ is the same as RAGA except for the training entity pairs. Table 5 provides other necessary results and variants in practical setting. There are five major observations:
214
+
215
+ 1. The performance of our method with pseudo entity pairs is similar to those with true entity pairs. For example, in Table 4, for local alignment results of FR-EN in DBP15K, the UED without $\mathcal{L}_g$ uses 10,689 pseudo entity pairs and gains 0.913 Hits@1, while the RAGA uses 4500 true entity pairs and gains 0.914 Hits@1. Although the proportion of how many pseudo entity pairs can play an equal role as true entity pairs changes, depending on the quality of the initial entity embedding and the KGs (Figure 2), it is valid to obtain pseudo entity pairs when true entity pairs are unavailable.
216
+
217
+ <table><tr><td></td><td colspan="3">DBP15K</td><td colspan="2">MedED</td></tr><tr><td></td><td>ZH</td><td>JA</td><td>FR</td><td>FR</td><td>ES</td></tr><tr><td>Local</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>RAGA</td><td>.798</td><td>.831</td><td>.914</td><td>.896</td><td>.914</td></tr><tr><td>UED</td><td>.779</td><td>.820</td><td>.929</td><td>.895</td><td>.893</td></tr><tr><td>w/o Lg</td><td>.759</td><td>.794</td><td>.913</td><td>.891</td><td>.896</td></tr><tr><td>Global</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>UED</td><td>.877</td><td>.915</td><td>.975</td><td>.970</td><td>.976</td></tr><tr><td>w/o dec.</td><td>.873</td><td>.910</td><td>.973</td><td>.969</td><td>.973</td></tr><tr><td>w/o Lg</td><td>.875</td><td>.910</td><td>.973</td><td>.971</td><td>.975</td></tr><tr><td>w/o OTP</td><td>.779</td><td>.820</td><td>.921</td><td>.895</td><td>.893</td></tr><tr><td>UED(DAA)</td><td>.847</td><td>.891</td><td>.962</td><td>.955</td><td>.956</td></tr></table>
218
+
219
+ Table 4: Hits@1 results of method variants (relaxed setting) in the EA task. The dec. is the weight decreasing mechanism of the globally guided loss, $\mathcal{L}_g$ . ZH, JA, FR and ES denotes the KG pairs ZH-EN, JA-EN, FR-EN and ES-EN.
220
+
221
+ <table><tr><td></td><td colspan="2">FR-EN</td><td colspan="2">ES-EN</td></tr><tr><td></td><td>EA</td><td>DED</td><td>EA</td><td>DED</td></tr><tr><td>UED</td><td>.803</td><td>.845</td><td>.874</td><td>.790</td></tr><tr><td>w/o empty</td><td>.555</td><td>-</td><td>.652</td><td>-</td></tr><tr><td>w. gold α, β</td><td>.809</td><td>.803</td><td>.874</td><td>.790</td></tr><tr><td>UED(CODER)</td><td>.884</td><td>.863</td><td>.933</td><td>.865</td></tr></table>
222
+
223
+ Table 5: Results of method variants (practical setting) in MedED. We report Hits@1 and F-score for EA and DED. The w/o empty denotes the OTP without the empty entities. The w. gold $\alpha, \beta$ denote that the $\alpha$ and $\beta$ in the OTP are selected by the dangling training set. UED(CODER) refers to the method that we replace the Glove with a medical language model in UED.
224
+
225
+ 2. The proposed global alignment method is stable and effective, causing significant improvements $(0.046\sim 0.098\mathrm{Hits}@\mathrm{1})$ compared with the UED for local alignment Table 4).
226
+ 3. The globally guided similarity and loss and the weight decreasing mechanism are usually helpful (Table 4).
227
+ 4. Introducing the empty entity is necessary. The global method without empty entities harms the EA result and cannot be applied to the DAD task (Table 5).
228
+ 5. The proposed method for searching proper $\alpha^{*}$ and $\beta^{*}$ produces successful results. The results with $\alpha^{*}$ and $\beta^{*}$ achieve the same level of performance for EA and DED compared to the gold selection for $\alpha$ and $\beta$ based on the EA training entity pairs.
229
+
230
+ Besides, we attempt to replace the GloVe in MedED with a pretrained medical language model (LM), the English version of CODER (Yuan et al., 2022), and show that a proper domain-specific LM
231
+
232
+ ![](images/568a9d0d9356fadc83627bca5d7d500b95d5b077237bfd5abc6ddb4c82dabe80.jpg)
233
+ Figure 2: The Hits@1 in DBP15K (practical setting) for the UED without $\mathcal{L}_g$ and OTP. The solid line and dashed line denotes the method trained with the training entity pairs and pseudo entity pairs, respectively.
234
+
235
+ trained on a large KG may achieve better results (Table 5).
236
+
237
+ # 7 Conclusion
238
+
239
+ This paper proposes a novel unified unsupervised method for both EA and DED, which better fits the realistic scenario for integrating KGs. UED contains four modules: pseudo entity pair generation, information aggregation, globally guided similarity and loss, and a modified OTP for global EA and DED. The first three modules mine the information in KGs to get rid of supervised entity pairs, while the last module integrates EA and DED into a unified framework to identify dangling entities without supervision and provide better EA results. We also construct a new dataset for the EA and DED tasks and perform experiments to demonstrate the effectiveness of UED.
240
+
241
+ # Acknowledgements
242
+
243
+ We would like to express our gratitude to the reviewers for their helpful comments and suggestions. We thank Zheng Yuan, Hongyi Yuan, Pengyu Cheng, and Huaiyuan Ying for their help.
244
+
245
+ # References
246
+
247
+ Antoine Bordes, Nicolas Usunier, Alberto Garcia-Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi-relational data. Advances in neural information processing systems, 26.
248
+ Yixin Cao, Xiang Wang, Xiangnan He, Zikun Hu, and Tat-Seng Chua. 2019. Unifying knowledge graph
249
+
250
+ learning and recommendation: Towards a better understanding of user preferences. In The world wide web conference, pages 151-161.
251
+ Muhao Chen, Yingtao Tian, Mohan Yang, and Carlo Zaniolo. 2017. Multilingual knowledge graph embeddings for cross-lingual knowledge alignment. In Proceedings of the 26th International Joint Conference on Artificial Intelligence, pages 1511-1517.
252
+ Qiao Jin, Zheng Yuan, Guangzhi Xiong, Qianlan Yu, Huaiyuan Ying, Chuanqi Tan, Mosha Chen, Songfang Huang, Xiaozhong Liu, and Sheng Yu. 2022. Biomedical question answering: A survey of approaches and challenges. ACM Computing Surveys (CSUR), 55(2):1-36.
253
+ Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907.
254
+ Harold W Kuhn. 1955. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97.
255
+ Xueyuan Lin, Wenyu Song, Haoran Luo, et al. 2021. Echoea: Echo information between entities and relations for entity alignment. arXiv preprint arXiv:2107.03054.
256
+ Donald AB Lindberg, Betsy L Humphreys, and Alexa T McCray. 1993. The unified medical language system. Yearbook of Medical Informatics, 2(01):41-51.
257
+ Xiao Liu, Haoyun Hong, Xinghao Wang, Zeyi Chen, Evgeny Kharlamov, Yuxiao Dong, and Jie Tang. 2021. A self-supervised method for entity alignment. arXiv preprint arXiv:2106.09395.
258
+ Xin Mao, Wenting Wang, Huimin Xu, Yuanbin Wu, and Man Lan. 2020. Relational reflection entity alignment. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, pages 1095-1104.
259
+ Tam Thanh Nguyen, Thanh Trung Huynh, Hongzhi Yin, Vinh Van Tong, Darnbi Sakong, Bolong Zheng, and Quoc Viet Hung Nguyen. 2020. Entity alignment for knowledge graphs with multi-order convolutional networks. IEEE Transactions on Knowledge and Data Engineering.
260
+ Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543.
261
+ Denis Savenkov and Eugene Agichtein. 2016. Crqa: Crowd-powered real-time automatic question answering system. In Fourth AAAI conference on human computation and crowdsourcing.
262
+ Zequn Sun, Muhao Chen, and Wei Hu. 2021. Knowing the no-match: Entity alignment with dangling cases. In Proceedings of the 59th Annual Meeting of the
263
+
264
+ Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3582-3593, Online. Association for Computational Linguistics.
265
+ Zequn Sun, Wei Hu, and Chengkai Li. 2017. Crosslingual entity alignment via joint attribute-preserving embedding. In International Semantic Web Conference, pages 628-644. Springer.
266
+ Zequn Sun, Wei Hu, Qingheng Zhang, and Yuzhong Qu. 2018. Bootstrapping entity alignment with knowledge graph embedding. In Proceedings of the 27th International Joint Conference on Artificial Intelligence, pages 4396-4402.
267
+ Bayu Distiawan Trisedya, Jianzhong Qi, and Rui Zhang. 2019. Entity alignment between knowledge graphs using attribute embeddings. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 297-304.
268
+ Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. arXiv preprint arXiv:1710.10903.
269
+ Zhichun Wang, Qingsong Lv, Xiaohan Lan, and Yu Zhang. 2018. Cross-lingual knowledge graph alignment via graph convolutional networks. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 349-357.
270
+ Yuting Wu, Xiao Liu, Yansong Feng, Zheng Wang, Rui Yan, and Dongyang Zhao. 2019. Relation-aware entity alignment for heterogeneous knowledge graphs. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence. International Joint Conferences on Artificial Intelligence.
271
+ Chenyan Xiong, Russell Power, and Jamie Callan. 2017. Explicit semantic ranking for academic search via knowledge graph embedding. In Proceedings of the 26th international conference on world wide web, pages 1271-1279.
272
+ Kun Xu, Linfeng Song, Yansong Feng, Yan Song, and Dong Yu. 2020. Coordinated reasoning for cross-lingual knowledge graph alignment. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 9354–9361.
273
+ Kun Xu, Liwei Wang, Mo Yu, Yansong Feng, Yan Song, Zhiguo Wang, and Dong Yu. 2019. Cross-lingual knowledge graph alignment via graph matching neural network. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3156-3161, Florence, Italy. Association for Computational Linguistics.
274
+ Mo Yu, Wenpeng Yin, Kazi Saidul Hasan, Cicero dos Santos, Bing Xiang, and Bowen Zhou. 2017. Improved neural relation detection for knowledge base question answering. In Proceedings of the 55th Annual Meeting of the Association for Computational
275
+
276
+ Linguistics (Volume 1: Long Papers), pages 571-581, Vancouver, Canada. Association for Computational Linguistics.
277
+
278
+ Zheng Yuan, Zhengyun Zhao, Haixia Sun, Jiao Li, Fei Wang, and Sheng Yu. 2022. Coder: Knowledgeinfused cross-lingual medical term embedding for term normalization. Journal of biomedical informatics, page 103983.
279
+
280
+ Weixin Zeng, Xiang Zhao, Jiuyang Tang, Xinyi Li, Minnan Luo, and Qinghua Zheng. 2021. Towards entity alignment in the open world: an unsupervised approach. In International Conference on Database Systems for Advanced Applications, pages 272-289. Springer.
281
+
282
+ Weixin Zeng, Xiang Zhao, Jiuyang Tang, and Xuemin Lin. 2020. Collective entity alignment via adaptive features. In 2020 IEEE 36th International Conference on Data Engineering (ICDE), pages 1870-1873. IEEE.
283
+
284
+ Xiang Zhao, Weixin Zeng, Jiuyang Tang, Wei Wang, and Fabian Suchanek. 2020. An experimental study of state-of-the-art entity alignment approaches. IEEE Transactions on Knowledge & Data Engineering, (01):1-1.
285
+
286
+ Renbo Zhu, Meng Ma, and Ping Wang. 2021a. Raga: Relation-aware graph attention networks for global entity alignment. In PAKDD (1), pages 501-513. Springer.
287
+
288
+ Yao Zhu, Hongzhi Liu, Zhonghai Wu, and Yingpeng Du. 2021b. Relation-aware neighborhood matching model for entity alignment. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 4749-4756.
289
+
290
+ # A Appendix
291
+
292
+ # A.1 Practiacl Evaluation Results
293
+
294
+ For completeness, this appendix reports the EA results on DBP15K in practiacl evaluation setting (Table 6). We compared our methods with the RAGA, since we adopt the part of graph embedding in RAGA in our framework.
295
+
296
+ <table><tr><td></td><td>ZH-EN @1</td><td>JA-EN @10 MRR</td><td>FR-EN @10 MRR</td><td>@10 MRR</td></tr><tr><td colspan="5">local</td></tr><tr><td>Init-Emb</td><td>.570</td><td>.686</td><td>.611</td><td>.633</td></tr><tr><td>RAGA</td><td>.725</td><td>.903</td><td>.790</td><td>.773</td></tr><tr><td>UED</td><td>.751</td><td>.892</td><td>.802</td><td>.793</td></tr><tr><td colspan="5">global</td></tr><tr><td>RAGA</td><td>.834</td><td></td><td>.742</td><td></td></tr><tr><td>UED(DAA)</td><td>.799</td><td></td><td>.769</td><td></td></tr><tr><td>UED</td><td>.847</td><td></td><td>.890</td><td></td></tr></table>
297
+
298
+ Table 6: EA results on DBP15K (practical setting). @1 and @10 denotes the Hits@1 and Hits@10. The UED(DAA) refer to the variant of UED by replacing the OTP with DAA.
anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f22b6189bf29c85ee4d05aad56d7abe788de8f98260e518e804b6c923f52e57
3
+ size 401218
anaccurateunsupervisedmethodforjointentityalignmentanddanglingentitydetection/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a28e5a15f0f32efcfe826304c9b182ba2b2ecd9e49114893d8936316fc49cc4
3
+ size 375623
analyzingdynamicadversarialtrainingdatainthelimit/b9bb3051-d3ff-4ead-a235-514613ae4dbc_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cb71480a885a4bb0f0f778ea949af1425c2f8eccd419932cb281687195aa49c
3
+ size 97194
analyzingdynamicadversarialtrainingdatainthelimit/b9bb3051-d3ff-4ead-a235-514613ae4dbc_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8758603b0dfd4b849e9a2ac59ca0348388cf04c67738955c1c6602d007e3206
3
+ size 120477