Add Batch 371ef547-f3da-4e9f-a9ad-6136075aed26
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/15aa9f0b-ccfc-4486-96ee-4b189be9ac2b_content_list.json +3 -0
- abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/15aa9f0b-ccfc-4486-96ee-4b189be9ac2b_model.json +3 -0
- abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/15aa9f0b-ccfc-4486-96ee-4b189be9ac2b_origin.pdf +3 -0
- abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/full.md +361 -0
- abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/images.zip +3 -0
- abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/layout.json +3 -0
- acceleratingtransformerinferencefortranslationviaparalleldecoding/4090c341-5ebb-47f1-b801-0ff756c1c814_content_list.json +3 -0
- acceleratingtransformerinferencefortranslationviaparalleldecoding/4090c341-5ebb-47f1-b801-0ff756c1c814_model.json +3 -0
- acceleratingtransformerinferencefortranslationviaparalleldecoding/4090c341-5ebb-47f1-b801-0ff756c1c814_origin.pdf +3 -0
- acceleratingtransformerinferencefortranslationviaparalleldecoding/full.md +498 -0
- acceleratingtransformerinferencefortranslationviaparalleldecoding/images.zip +3 -0
- acceleratingtransformerinferencefortranslationviaparalleldecoding/layout.json +3 -0
- accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/92f3e19f-06e9-454d-a2b2-83f83888e1e7_content_list.json +3 -0
- accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/92f3e19f-06e9-454d-a2b2-83f83888e1e7_model.json +3 -0
- accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/92f3e19f-06e9-454d-a2b2-83f83888e1e7_origin.pdf +3 -0
- accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/full.md +763 -0
- accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/images.zip +3 -0
- accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/layout.json +3 -0
- aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/0b69e674-6367-45fa-a4f3-60843206c20e_content_list.json +3 -0
- aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/0b69e674-6367-45fa-a4f3-60843206c20e_model.json +3 -0
- aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/0b69e674-6367-45fa-a4f3-60843206c20e_origin.pdf +3 -0
- aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/full.md +0 -0
- aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/images.zip +3 -0
- aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/layout.json +3 -0
- activelysupervisedclusteringforopenrelationextraction/b96a6d17-18bc-44ec-b68e-7c0258200d66_content_list.json +3 -0
- activelysupervisedclusteringforopenrelationextraction/b96a6d17-18bc-44ec-b68e-7c0258200d66_model.json +3 -0
- activelysupervisedclusteringforopenrelationextraction/b96a6d17-18bc-44ec-b68e-7c0258200d66_origin.pdf +3 -0
- activelysupervisedclusteringforopenrelationextraction/full.md +387 -0
- activelysupervisedclusteringforopenrelationextraction/images.zip +3 -0
- activelysupervisedclusteringforopenrelationextraction/layout.json +3 -0
- adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/1e8e83d4-ba76-454a-bbfe-afb2c136470e_content_list.json +3 -0
- adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/1e8e83d4-ba76-454a-bbfe-afb2c136470e_model.json +3 -0
- adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/1e8e83d4-ba76-454a-bbfe-afb2c136470e_origin.pdf +3 -0
- adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/full.md +440 -0
- adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/images.zip +3 -0
- adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/layout.json +3 -0
- adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/7a308754-c09b-4a37-a605-d2c7cd65fe75_content_list.json +3 -0
- adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/7a308754-c09b-4a37-a605-d2c7cd65fe75_model.json +3 -0
- adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/7a308754-c09b-4a37-a605-d2c7cd65fe75_origin.pdf +3 -0
- adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/full.md +471 -0
- adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/images.zip +3 -0
- adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/layout.json +3 -0
- advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/654811cb-0e2a-4224-92f1-ee372104182c_content_list.json +3 -0
- advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/654811cb-0e2a-4224-92f1-ee372104182c_model.json +3 -0
- advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/654811cb-0e2a-4224-92f1-ee372104182c_origin.pdf +3 -0
- advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/full.md +387 -0
- advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/images.zip +3 -0
- advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/layout.json +3 -0
- adynamicprogrammingalgorithmforspanbasednestednamedentityrecognitioninon2/0495ac5b-b465-4228-b5d4-f69d569070af_content_list.json +3 -0
- adynamicprogrammingalgorithmforspanbasednestednamedentityrecognitioninon2/0495ac5b-b465-4228-b5d4-f69d569070af_model.json +3 -0
abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/15aa9f0b-ccfc-4486-96ee-4b189be9ac2b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5bfe6e05e0d917750edf157b6e498c0d16034e8f7b06a080d6d5d852da5f6a0
|
| 3 |
+
size 91272
|
abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/15aa9f0b-ccfc-4486-96ee-4b189be9ac2b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:180bd79bda698aa06edc46053ea24d986803e3954c44b67cdf657eb557778ec6
|
| 3 |
+
size 111878
|
abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/15aa9f0b-ccfc-4486-96ee-4b189be9ac2b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7036e3081ebd04a56acb039b77dd5978aed0d81092243841ad4982476bf8863
|
| 3 |
+
size 535373
|
abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/full.md
ADDED
|
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Abductive Commonsense Reasoning Exploiting Mutually Exclusive Explanations
|
| 2 |
+
|
| 3 |
+
Wenting Zhao and Justin T. Chiu and Claire Cardie and Alexander M. Rush
|
| 4 |
+
|
| 5 |
+
Department of Computer Science
|
| 6 |
+
|
| 7 |
+
Cornell University
|
| 8 |
+
|
| 9 |
+
{wz346, jtc257, ctc9, arush}@cornell.edu
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Abductive reasoning aims to find plausible explanations for an event. This style of reasoning is critical for commonsense tasks where there are often multiple plausible explanations. Existing approaches for abductive reasoning in natural language processing (NLP) often rely on manually generated annotations for supervision; however, such annotations can be subjective and biased. Instead of using direct supervision, this work proposes an approach for abductive commonsense reasoning that exploits the fact that only a subset of explanations is correct for a given context. The method uses posterior regularization to enforce a mutual exclusion constraint, encouraging the model to learn the distinction between fluent explanations and plausible ones. We evaluate our approach on a diverse set of abductive reasoning datasets; experimental results show that our approach outperforms or is comparable to directly applying pretrained language models in a zero-shot manner and other knowledge-augmented zero-shot methods.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Abductive reasoning aims to find plausible explanations for an event (Paul, 1993). Unlike deduction, which draws a firm conclusion from a set of premises, abduction requires reasoning from an outcome to plausible explanations. Fig. 1 (top) demonstrates the distinction: given only the context $x$ , both the blue and the red sentences describe possible subsequent events; however, upon seeing the outcome $y$ only one of the two is a plausible explanation (although there may be others). Humans apply abduction in everyday situations (Andersen, 1973) such as reading-between-the-lines (Charniak and Shimony, 1990) and analyzing causes and effects (Thagard and Shelley, 1997; Pearl and Mackenzie, 2018). Learning to perform abduction is thus an important step towards building human-like machines with commonsense knowledge.
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1: Top: An abductive reasoning example consisting of a context $x$ , an outcome $y$ , and two candidate explanations. The goal is to identify the plausible explanation given $x$ and $y$ . To predict an explanation, one can apply a pretrained language model (shown as LM) to score $y$ given $x$ and an explanation, and then compute the posterior probability for the explanation. Bottom: Using a LM without fine-tuning (Zero-shot) leads to poor performance, whereas a LM fine-tuned via max-marginal likelihood (Tuned) fails to distinguish the two explanations. LiPoR is trained to partition the explanations in a mutually exclusive manner.
|
| 21 |
+
|
| 22 |
+
Abductive reasoning has been extensively studied in the setting where annotations are available (Storks et al., 2019). However, because determining whether an explanation is plausible is a subjective and noisy process, annotating plausibility of explanations can be problematic for commonsense reasoning problems. Zhang et al. (2020) show that, in a dataset verification step where five annotators are asked to determine whether a handwritten explanation is plausible, they disagree with each other on $62.34\%$ of 1365 explanations. This subjectivity thus introduces annotator-specific bias as has been seen in related tasks (Elazar et al., 2021;
|
| 23 |
+
|
| 24 |
+
Geva et al., 2019). The potential bias in plausibility annotation motivates the study of learning to perform abductive reasoning without plausibility annotations. Thus, we consider the setting where the context $x$ and outcome $y$ are observed, and models must learn to identify plausible explanations out of a given set of candidate explanations, without direct supervision over plausibility.
|
| 25 |
+
|
| 26 |
+
Rule-based methods use formal logic to reason about explanations (Paul, 1993); however, their limited coverage prevents them from scaling to the full complexity of natural language. Recently, pretrained language models, which have achieved remarkable performance on a range of NLP tasks (Li et al., 2020; Wei et al., 2022a), hold the potential for zero-shot abductive reasoning. Specifically, Bhagavatula et al. (2019) directly estimate the probability of an explanation for an outcome through Bayes' Rule (Zero-shot in Fig. 1). In practice, however, this direct approach can often lead to performance that is only slightly better than random guessing (Zhang et al., 2020; Zhou et al., 2021b).
|
| 27 |
+
|
| 28 |
+
To avoid these issues, we reduce abductive reasoning down to a single constraint — an explanation must be plausible or implausible. This restriction, argued by Gordon and Hobbs (2017), enforces that explanations are mutually exclusive; that is, one explanation being plausible automatically rules out some other explanations. We introduce Likelihood learning with Posterior Regularization (LiPoR), an approach to perform abductive reasoning that only leverages mutual exclusivity of explanations and does not rely on plausibility annotations. Specifically, we maximize the marginal likelihood of the outcome given the context and a set of explanations (Tuned in Fig 1), then use posterior regularization to enforce mutual exclusion between plausible and implausible explanations (LiPoR in Fig 1). We show how to impose this relation with a simple distributional constraint on the posterior of the model.
|
| 29 |
+
|
| 30 |
+
We empirically evaluate LiPoR on a diverse set of abductive reasoning datasets. Specifically, we consider four datasets under the abductive reasoning framework: $\alpha$ NLI (Bhagavatula et al., 2019), Sen-Making (Wang et al., 2019), $\delta$ -NLI (Rudinger et al., 2020), and WinoWhy (Zhang et al., 2020). Results show that LiPoR consistently outperforms pretrained language models directly applied in a zero-shot manner and is comparable to different variants of a state-of-the-art knowledge-augmented
|
| 31 |
+
|
| 32 |
+
zero-shot method (Ma et al., 2021). As human-written explanation candidates are not always available during fine-tuning, we further evaluate LiPoR on the explanation candidates generated via prompting (Brown et al., 2020). We show that, even though automatically generated explanations are noisy, LiPoR can still leverage them and outperform strong zero-shot models including GPT3.
|
| 33 |
+
|
| 34 |
+
# 2 Related Work
|
| 35 |
+
|
| 36 |
+
Zero-shot commonsense reasoning. We categorize zero-shot approaches for commonsense reasoning into two groups. The first group uses pretrained language models as a source of world knowledge. Shwartz et al. (2020); Zhou et al. (2021a) query the language models with information seeking questions to identify background knowledge relevant to specific examples, and the answers returned by the models are later used as additional information for producing the final outputs. Dou and Peng (2022) convert multiple-choice QA to cloze-style sentences and have the language models score different answers. Qin et al. (2020) proposed a decoding algorithm that generates free-form explanations by considering the future contexts through backpropagation. Our approach also uses pretrained language models as a source of knowledge, but we perform additional maximum likelihood fine-tuning to fit the abductive task data.
|
| 37 |
+
|
| 38 |
+
The second group leverages external knowledge bases (KBs). Bosselut et al. (2021) leverage COMET (Bosselut et al., 2019), a dynamic knowledge graph, to generate a chain of commonsense inferences based on contexts of QA examples, which can be treated as explanations. Banerjee and Baral (2020); Ma et al. (2021) pretrain language models on artificial question answering (QA) datasets, created from knowledge graphs; a system trained on such datasets can directly perform zero-shot QA. Huang et al. (2021) formulate multiple-choice QA as natural language inference (NLI) and leverage both existing NLI datasets and KBs to identify answer choices in a zero-shot manner.
|
| 39 |
+
|
| 40 |
+
Relation to deductive reasoning. Both abduction and deduction have intermediate explanations. Abductive reasoning infers the most likely explanation from outcomes. In contrast, deductive reasoning infers a conclusion given a complete set of premises. However, outcomes are often not a direct result of premises but come from a chain of reasoning over intermediate explanations. Identifying and
|
| 41 |
+
|
| 42 |
+
providing the correct chain of reasoning is crucial to building trustworthy systems.
|
| 43 |
+
|
| 44 |
+
Within the realm of deduction there are several different approaches that utilize neural models. Bostrom et al. (2021) develop a pipeline to automatically construct training examples from Wikipedia, so that a system trained on such data is able to generate deductive inferences from natural language inputs without direct human supervision. Arabshahi et al. (2021) present a neuro-symbolic theorem prover that extracts intermediate reasoning steps for understanding conversations. Rajani et al. (2019); Tafjord et al. (2021); Nye et al. (2022); Wei et al. (2022b) collect human annotated explanations for training interpretable systems which first generate intermediate explanations and then produce the final task outputs.
|
| 45 |
+
|
| 46 |
+
Explanations as latent variables. Modeling intermediate explanations as latent variables is a common approach, although training and inference details differ. Here we consider representative works in NLP. Zhou et al. (2020) apply a latent variable model to language understanding and train the model with variational expectation maximization. Their method can generate free-form explanations but requires a small set of labeled examples for supervision. Zhou et al. (2021b) apply such a model to probe dialogue generation in a zero-shot manner. Vig et al. (2020) apply a latent variable model to analyze gender bias in large pretrained language models by viewing the behaviors of neurons as unobserved explanations. Lei et al. (2016); Vafa et al. (2021) apply such a model to identify rationales for sequence classification/generation, where rationales are a minimal subset of inputs or previous words that can lead to the same predictions. Li-PoR is a training scheme developed for learning such latent-variable models for abductive reasoning, which has a unique challenge of identifying multiple plausible explanations.
|
| 47 |
+
|
| 48 |
+
# 3 Abductive Reasoning
|
| 49 |
+
|
| 50 |
+
We consider four datasets that test abductive reasoning skills. While abduction can be difficult to pinpoint, we select datasets that obey the following criteria: there is a need for differentiating plausible explanations from implausible explanations, there is an observed outcome, and the outcome depends on intermediate explanations. Based on these criteria, we use $\alpha$ NLI (Bhagavatula et al., 2019), Sen-Making (Wang et al., 2019), $\delta$ -NLI (Rudinger et al.,
|
| 51 |
+
|
| 52 |
+
<table><tr><td rowspan="3">αNLI</td><td>x: it was a very hot summer day</td></tr><tr><td>z: {he decided to run in the heat, he drank a glass of ice cold water}</td></tr><tr><td>y: he felt much better</td></tr><tr><td rowspan="2">Sen-Making</td><td>z: {a restaurant does not have doctors or medical treatment, a restaurant is usually too noisy for a patient, there are different types of restaurants in the city}</td></tr><tr><td>y: it is not true that he was sent to a restaurant for treatment</td></tr><tr><td rowspan="3">δ-NLI</td><td>x: four people and a child walking in the street</td></tr><tr><td>z: {people from all over the world are gathered in the area, the people buy cotton candy from a booth, the family is the only humans in the area, the family is walking their dog}</td></tr><tr><td>y: the family is enjoying the world's fair</td></tr><tr><td rowspan="3">WinoWhy</td><td>x: the fish ate the worm, it was hungry</td></tr><tr><td>z: {hungry staff tend to eat, worm is one being eaten, the worm is a common name for a variety of fish</td></tr><tr><td>y: therefore, it refers to the fish</td></tr></table>
|
| 53 |
+
|
| 54 |
+
Table 1: Examples conversions from different datasets. Every dataset comes with candidate explanations (shown in the pink cells), and only a subset of them are plausible explanations (shown in boldface). We set $x$ in Sen-Making dataset to empty.
|
| 55 |
+
|
| 56 |
+
2020), and WinoWhy (Zhang et al., 2020) as our target datasets.
|
| 57 |
+
|
| 58 |
+
To convert each to the abduction format, we first identify a context $x$ , which sets a scope for candidate explanations $\mathcal{Z}$ , as well as an outcome $y$ . The outcome could either be an event caused by $z$ or a conclusion reached by $z$ . Importantly, we differentiate explanation candidates $\mathcal{Z}$ as ones that are consistent with $x$ , from plausible explanations that are consistent with both $x$ and $y$ . A central assumption is that training abductive reasoning systems with the candidate set introduces less noise and subjectivity than directly supervising the systems with plausibility annotations.
|
| 59 |
+
|
| 60 |
+
Example conversions of each dataset are shown in Table 1. Because $\alpha$ NLI is designed as an abduction task, the conversion is straightforward. Sen-Making is a benchmark that tests if a system can identify the reason why a statement is against common sense. In this case, a context is not required. We turn the nonsensical statement into a negative sentence, which becomes $y$ . Then the original answer choices become $z$ . $\delta$ -NLI is a defeasible in-
|
| 61 |
+
|
| 62 |
+
ference task, which requires deciding whether new evidence has strengthened or weakened the original hypothesis. $\delta$ -NLI is made of extensions to three existing inference datasets: SNLI (Bowman et al., 2015), ATOMIC (Sap et al., 2019), and SOCIALCHEM-101 (Forbes et al., 2020); each of them will be referred to as $\delta-N$ for brevity, where $N$ can be replaced by a dataset name. We map premises and hypotheses to contexts and outcomes, respectively. We then turn updates that strengthen a hypothesis into a plausible explanation and updates that weaken a hypothesis into an implausible explanation. WinoWhy is a follow-up task for Winograd Schema Challenge (WSC) (Levesque et al., 2012): Given the pronoun coreference resolution question and the answer from a WSC example, WinoWhy seeks to select all plausible reasons for why the pronoun is resolved to the answer. We thus turn the question of the WSC example into a context $x$ and the answer into a declarative sentence $y$ .
|
| 63 |
+
|
| 64 |
+
Notably these datasets differ in the number of plausible explanations, which we denote by a value $m \geq 1$ . In $\alpha$ NLI and Sen-Making, $m$ is fixed to 1 for all examples. However, in $\delta$ -NLI and WinoWhy, $m$ is variable, and we assume that half of explanations are plausible. However these explanations are discrete; an explanation is either plausible or implausible. A successful unsupervised system should assign high probabilities to plausible explanations and low probabilities to implausible explanations. This discreteness is encoded into some of the tasks directly. For example, Bhagavatula et al. (2019); Zhang et al. (2020) instruct the annotators to make minimal possible changes to plausible explanations to produce implausible explanations, so that a system would fail if it predicts explanations based on superficial lexical features.
|
| 65 |
+
|
| 66 |
+
# 4 LiPoR
|
| 67 |
+
|
| 68 |
+
We now describe LiPoR, a method to adapt pretrained language models to incorporate mutual exclusivity between explanations. As we have seen, an abductive reasoning example consists of a context $x$ , an observed outcome $y$ , and an unobserved explanation $z \in \mathcal{Z}$ , which, together with $x$ , has led to $y$ . Importantly, the candidate set of explanations $\mathcal{Z}$ is given during training but the plausibility of each explanation is not. The goal of abductive
|
| 69 |
+
|
| 70 |
+
reasoning is to produce a distribution over explanations $z$ , defined by $p(z|x,y)$ . We are interested in modeling the joint distribution $p(y,z|x)$ , which is factored as follows:
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
p (y, z | x) = p (y | x, z) p (z | x) \tag {1}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
Given Eq 1, the posterior distribution can be obtained via the Bayes' rule,
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
p (z | x, y) = \frac {p (y | z , x) p (z | x)}{p (y | x)}. \tag {2}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
Because $x$ itself does not provide further information for $z$ , we set $p(z|x)$ to be a uniform distribution. Therefore, we only parameterize $p(y|x,z)$ .
|
| 83 |
+
|
| 84 |
+
# 4.1 Baseline: Fine-tuning via Max-marginal Likelihood
|
| 85 |
+
|
| 86 |
+
We note that any off-the-shelf pretrained language model can be applied to evaluate $p(z|x,y)$ for an abductive reasoning task in a zero-shot fashion. To adapt the pretrained model to a specific task distribution without plausibility annotations, we maximize the following marginal likelihood function $\mathcal{L}(\cdot)$ with respect to parameters $\theta$ for all examples:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\mathcal {L} (\theta) = \log \sum_ {z \in \mathcal {Z}} p _ {\theta} (y | x, z) p (z | x). \tag {3}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
Maximizing the marginal likelihood encourages the model to prefer explanations that assign the outcome high probability. Mechanically, the marginal likelihood requires computing the probability of the outcome given every explanation in the set $\mathcal{Z}$ . Training then gives credit (gradient) to explanations that assign high probability to the outcome, encouraging the model to prefer explanations that explain the outcome. We parameterize $p(y|x,z)$ by $\theta$ , a language model, that takes " $x[\mathrm{SEP}]z$ as input and returns a probability distribution over $y$ . By optimizing this objective, we find $\theta$ under which $p(y|x)$ has a high likelihood, thus shifting the pretrained model to the new task-specific distribution. Furthermore, this objective does not require plausibility annotations for explanations.
|
| 93 |
+
|
| 94 |
+
# 4.2 Incorporating Mutual Exclusivity
|
| 95 |
+
|
| 96 |
+
The goal of abductive reasoning is to separate out plausible and implausible explanations. However, we note that $\mathcal{L}(\theta)$ itself only maximizes $p(y|x)$ . In practice, this does not require the model to learn any distinctions between explanations, and we observe that in practice the approach learns
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
Figure 2: Entropy of $p(z|x,y)$ on $\alpha$ NLI at different training steps. The orange line and the blue line represent with and without PR, respectively. Without PR the model never learns to distinguish between explanations.
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
Figure 3: Visualization of $\Omega(\cdot)$ for $|\mathcal{Z}| = 3$ and $m = 2$ . The lighter colors correspond to larger values. This constraint penalizes models that select too many plausible explanations.
|
| 103 |
+
|
| 104 |
+
to treat them all as plausible. The blue line in Fig 2 shows the entropy of $p(z|x,y)$ on the $\alpha$ NLI dataset when fine-tuning a model with $\mathcal{L}(\theta)$ . We note that a uniform distribution of two categories has approximately an entropy of 0.6931, the upper bound on the entropy of $p(z|x,y)$ for the $\alpha$ NLI examples. Fine-tuning via max-marginal likelihood alone yields an entropy close to the upper bound, meaning the model believes that different $z$ explain $y$ equally well.
|
| 105 |
+
|
| 106 |
+
To impose the mutual exclusivity among explanations, we apply posterior regularization (PR), which places soft constraints on posterior distributions (Ganchev et al., 2010). The posterior regularized likelihood shows as follows:
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\mathcal {L} _ {P R} (\theta) = \mathcal {L} (\theta) - \lambda \Omega \left(p _ {\theta} (z | x, y)\right). \tag {4}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
To enforce a model to prefer specific explanations over the others, we choose $\Omega : \mathcal{R}^{|\mathcal{Z}|} \to \mathcal{R}$ to be the
|
| 113 |
+
|
| 114 |
+
following function, proposed in Chen et al. (2020):
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\Omega (p (z | x, y)) = \max \left(H \left(p _ {\theta} (z | x, y)\right), \ln (m)\right) \tag {5}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
$H(\cdot)$ is the entropy function. In Fig. 3, we plot $\Omega(\cdot)$ when $|\mathcal{Z}| = 3$ and $m = 2$ , which shows that distributions with a non-zero probability for the third explanation have larger $\Omega$ values. $\Omega(\cdot)$ thus penalizes a posterior distribution that has an entropy higher than $\ln(m)$ , which sets an upper bound at the entropy of a distribution whose probability mass collapses to $m$ categories. When $m = 1$ , $\Omega(\cdot)$ reduces to
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\Omega (p (z | x, y)) = H \left(p _ {\theta} (z | x, y)\right). \tag {6}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
The orange line in Fig. 2 shows that incorporating $\Omega(\cdot)$ enables the model to differentiate between different explanations. Notice that, except for $m = 1$ , there is no guarantee that $\Omega(\cdot)$ penalizes all distributions that have probability mass in more than $m$ categories, but we will empirically justify that $\Omega(\cdot)$ eliminates undesired posterior distributions.
|
| 127 |
+
|
| 128 |
+
# 5 Experimental Setup
|
| 129 |
+
|
| 130 |
+
Metrics. Accuracy is used to evaluate a system's predictive power. For datasets with $m = 1$ , accuracy is computed with regards to each example (i.e., whether the plausible explanation has been identified for each example). Otherwise, to stay consistent with evaluation in prior works, we compute accuracy with regards to each explanation (i.e., whether the plausibility of each explanation is correctly predicted). Therefore, more weight will be given to the instances that have larger $|\mathcal{Z}|$ (within a single dataset, the variance of $|\mathcal{Z}|$ for different examples is very small).
|
| 131 |
+
|
| 132 |
+
Baselines. We consider three groups of baselines: (1) methods that do not rely on plausibility annotations (shown as w/o annotations), (2) pretrained language models fine-tuned with plausibility annotations (shown as w/ annotations), and (3) methods that incorporate external knowledge bases (shown as "w/ KBs"). For (1), we first consider previous best published results achieved by a RoBERTa-large model for $\alpha$ NLI (Ma et al., 2021), by a BERT model for Sen-Making (Wang et al., 2019), and a GPT-small model for WinoWhy (Zhang et al., 2020) (all abbreviated as Prev. Best). Additionally, we use GPT-Neo (Black et al., 2021), GPT3 (text-davinci-002) (Brown et al., 2020), and the
|
| 133 |
+
|
| 134 |
+
<table><tr><td></td><td></td><td>αNLI</td><td>Sen-Making</td><td>δ-ATOMIC</td><td>δ-SNLI</td><td>δ-SOCIAL</td><td>WinoWhy</td></tr><tr><td rowspan="6">w/o annotations</td><td>Previous Best</td><td>65.50</td><td>45.60</td><td>-</td><td>-</td><td>-</td><td>56.37</td></tr><tr><td>ZS GPT-NEO</td><td>57.47</td><td>29.80</td><td>47.53</td><td>45.38</td><td>51.69</td><td>59.13</td></tr><tr><td>ZS GPT3</td><td>67.54</td><td>43.00</td><td>50.73</td><td>49.69</td><td>49.22</td><td>50.99</td></tr><tr><td>ZS BART</td><td>50.96</td><td>47.80</td><td>59.05</td><td>55.12</td><td>52.58</td><td>45.69</td></tr><tr><td>Tuned BART</td><td>57.40</td><td>63.50</td><td>67.49</td><td>64.76</td><td>53.88</td><td>55.32</td></tr><tr><td>LiPoR</td><td>71.56</td><td>65.50</td><td>76.82</td><td>65.26</td><td>57.19</td><td>69.88</td></tr><tr><td>w/ annotations</td><td>RoBERTa</td><td>85.60</td><td>93.10</td><td>78.30</td><td>81.60</td><td>86.20</td><td>75.04</td></tr><tr><td rowspan="5">w/ KB</td><td>KDDC-ATOMIC (N)</td><td>70.80</td><td>51.00</td><td>75.90</td><td>69.83</td><td>64.49</td><td>42.44</td></tr><tr><td>KDDC-CWWV (N)</td><td>70.00</td><td>45.70</td><td>62.48</td><td>63.24</td><td>62.90</td><td>40.45</td></tr><tr><td>KDDC-CSKG (N)</td><td>70.50</td><td>49.60</td><td>72.20</td><td>69.93</td><td>63.80</td><td>44.05</td></tr><tr><td>QNLI-ATOMIC (N)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>73.47</td></tr><tr><td>Previous Best (Y)</td><td>87.30</td><td>95.00</td><td>-</td><td>-</td><td>-</td><td>87.55</td></tr></table>
|
| 135 |
+
|
| 136 |
+
Table 2: Accuracy for identifying plausible explanations using methods with and without plausibility annotations. On each dataset, we boldface the best result within the methods without annotations. Suffix (Y) / (N) denotes whether a knowledge-augmented method use (Y) or not use (N) annotations, respectively.
|
| 137 |
+
|
| 138 |
+
BART-large model (Lewis et al., 2020) to directly score $x$ [SEP] $z$ [SEP] $y$ for each $z$ in a zero-shot (ZS) manner. We threshold the outputs of these models in the same way as done in our method to choose the plausible explanations. Finally, we consider BART fine-tuned with Eq. 3 (Tuned BART) as a baseline to better understand the role of posterior regularization. For (2), a RoBERTa-large model (Liu et al., 2019) is fine-tuned with plausibility annotations (abbreviated as RoBERTa). For this baseline, we refer to the best result in the literature: Ma et al. (2021) for $\alpha$ NLI, Wang et al. (2020) for Sen-Making, Rudinger et al. (2020) for $\delta$ -NLI, and Zhang et al. (2020) for WinoWhy. For (3), we run different variants of Knowledge-driven Data Construction (abbreviated as KDDC) (Ma et al., 2021), a method that leverages external knolwedge but not plausibility annotations. We note that KDDC is designed to predict a single correct answer with argmax. To handle the datasets that have more than one correct answers, we modify KDDC to choose the answers that have scores higher than the median. We also include Knowledge-Enabled Natural Language Inference (Huang et al., 2021) that is first supervised on QNLI (Wang et al., 2018) and then incorporate ATOMIC at inference time for WinoWhy (abbreviated as QNLI-ATOMIC). For models that use both external knowledge and plausibility annotations, we take RAINBOW (Raina et al., 2005) for $\alpha$ NLI, ECNU-SenseMaker (Zhao et al., 2020) for Sen-Making, and RoBERTa-Grande (Zhang et al., 2020) for WinoWhy.
|
| 139 |
+
|
| 140 |
+
Prompt for plausible explanations: Provide a brief explanation for why it is not sensible that $y$ Prompt for implausible explanations: Provide a brief explanation for why $y$
|
| 141 |
+
|
| 142 |
+
$y$ : He poured orange juice on his cereal.
|
| 143 |
+
|
| 144 |
+
In: Provide a brief explanation for why it is not sensible that he poured orange juice on his cereal.
|
| 145 |
+
|
| 146 |
+
Out: It is not sensible because orange juice does not go well with cereal.
|
| 147 |
+
|
| 148 |
+
In: Provide a brief explanation for why he poured orange juice on his cereal
|
| 149 |
+
|
| 150 |
+
Out: He wanted to eat a healthy breakfast.
|
| 151 |
+
|
| 152 |
+
Figure 4: Prompts for producing competing explanations, followed by an example generation.
|
| 153 |
+
|
| 154 |
+
Implementation & Hyperparameters. We choose a BART-large model (Lewis et al., 2020) to be $\theta$ . We train the model with the Hugging Face Transformers framework (Wolf et al., 2020). We perform grid search with learning rates $\{1e-6, 3e-6, 5e-6, 1e-5\}$ , batch sizes $\{2,4,8,16\}$ , and $\lambda$ $\{1e-2,1e-1,1,10\}$ . We train 50 epochs for WinoWhy and 10 epochs for all other datasets. We perform evaluation on dev sets every 500 steps. We choose the checkpoint whose posterior distributions have the lowest average entropy on dev sets to run tests if the entropy starts to diverge during training. If the entropy converges, we choose the checkpoint at the end of training.
|
| 155 |
+
|
| 156 |
+
Because there are not train/dev/test sets for WinoWhy, to perform a direct comparison with other methods, we do not split the dataset ourselves and simply train models on all of the data and choose the checkpoint based on loss values.
|
| 157 |
+
|
| 158 |
+
Automatic Candidate Generation LiPoR assumes access to a candidate explanation set $\mathcal{Z}$ during training with human-written explanations. However, we may also want to use the model in domains without a candidate set. We consider a variant that uses a noisy automatic candidate generation process. In this setting, set $\tilde{\mathcal{Z}}$ will contain a set of explanations with no guarantee that any are plausible.
|
| 159 |
+
|
| 160 |
+
To generate $\tilde{\mathcal{Z}}$ we utilize language model prompting with GPT3 (text-davinci-002) (Brown et al., 2020). Using prompt templates inspired by the instructions given to human annotators, we have the model generate explanations. We show example prompts for the Sen-Making dataset in Fig. 4. For datasets with fewer than 1000 unique contexts $x$ (i.e., $\delta$ -NLI and Winowhy), we generate one plausible explanation and one implausible explanation for every $x$ . For the other datasets, we randomly sample 1000 unique contexts and otherwise stay the same. We release the prompts as well as the generated explanations for every dataset in the supplementary materials.
|
| 161 |
+
|
| 162 |
+
In this setting, LiPoR uses a lower PR penalty $\lambda = 0.1$ . We additionally consider two more baselines. First, we score the most plausible explanation with the prompt as a prefix (denoted as Prompted GPT3). Secondly, we supervise RoBERTa-large with the generated explanations.
|
| 163 |
+
|
| 164 |
+
# 6 Results
|
| 165 |
+
|
| 166 |
+
We summarize the results in Table 2. First of all, LiPoR produces the best results compared to all other methods without plausibility annotations, including GPT3 which has many more parameters and is pretrained on more data. We note that LiPoR consistently outperforms Tuned BART, suggesting that posterior regularization plays a positive role in selecting plausible explanations. Compared to knowledge-augmented methods without plausibility annotations, LiPoR is able to produce better results on $\alpha$ NLI, Sen-Making, and $\delta$ -ATOMIC. We note that $\delta$ -NLI is in part created from knowledge bases, and therefore KDDC-\* is particularly good at $\delta$ -ATOMIC, $\delta$ -SNLI, and $\delta$ -SOCIAL, but fail on WinoWhy and Sen-Making. Additionally, QNLI-ATOMIC outperforms LiPoR by 4 points on Winowhy, but this improvement is expected given how much related task data it was pretrained on. Finally, LiPoR still cannot match the performance of RoBERTa trained with plausibility annotations.
|
| 167 |
+
|
| 168 |
+
In Table 4, we show the confusion matrices for comparing among ZS BART, Tuned BART, and LiPoR on the $\alpha$ NLI test set. Tuned BART and LiPoR make the same predictions on a majority of examples, and on the instances they disagree, LiPoR is able to correctly identify plausible explanations on twice as many examples. We also observe a similar trend for ZS BART and Tuned BART.
|
| 169 |
+
|
| 170 |
+
Fine-tuning with Generated Explanations Table 3 compares LiPoR fine-tuned with generated explanation candidates to the best performing methods without plausibility annotations. Even with noisy candidate sets, LiPoR is still able to leverage such data. It outperforms zero-shot GPT3 methods and improves over Prompted GPT3. Additionally, LiPoR is more robust than RoBERTa trained with plausibility annotations when such annotations are noisy. Therefore, even though the generated explanations by themselves correlate weakly with plausibility, they can be used in LiPoR.
|
| 171 |
+
|
| 172 |
+
# 7 Analysis
|
| 173 |
+
|
| 174 |
+
Preserving Plausible Candidates Models trained to prefer single plausible explanations can become overconfident in their predictions. A major benefit of LiPoR is that it considers multiple plausible candidates. While LiPoR is fine-tuned to favor mutual exclusivity, we find that at test time it remains able to score multiple plausible explanations highly. Table 5 presents two examples in which both explanations are plausible. The RoBERTa model trained with plausibility annotations produces posterior distributions that collapse to one explanation. However, LiPoR can assign significant probability to both explanations.
|
| 175 |
+
|
| 176 |
+
Qualitative Comparison Table 6 presents a number of examples accompanied with the predictions made by fine-tuning via max-marginal likelihood (-PR) and LiPoR (+PR) side by side. The two examples on the top are among the more difficult abduction examples: the first example requires a model to draw a connection between abstract concepts and concrete objects ("what you love" $\rightarrow$ "taking long warm showers"); the second example requires a model to figure out an inclusion relation (Nepal is a country in Asia). We italicize the words that co-occur across $x$ , $z$ and $y$ , and we speculate that fine-tuning chooses the wrong explanations because of lexical overlap shortcuts. LiPoR, however, was able to correctly flip these predictions with
|
| 177 |
+
|
| 178 |
+
<table><tr><td></td><td>αNLI</td><td>Sen-Making</td><td>δ-ATOMIC</td><td>δ-SNLI</td><td>δ-SOCIAL</td><td>Winowhy</td></tr><tr><td>ZS GPT3</td><td>67.54</td><td>43.00</td><td>50.73</td><td>49.69</td><td>49.22</td><td>50.99</td></tr><tr><td>Prompted GPT3</td><td>49.19</td><td>53.80</td><td>48.23</td><td>51.26</td><td>50.86</td><td>58.10</td></tr><tr><td>LiPoR</td><td>57.50</td><td>61.50</td><td>67.60</td><td>64.40</td><td>55.40</td><td>58.67</td></tr><tr><td>RoBERTa (Y)</td><td>53.71</td><td>61.30</td><td>62.74</td><td>57.81</td><td>51.78</td><td>42.13</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 3: Comparing LiPoR to several baselines on automatically generated explanation candidate sets. (Y) indicates that a method uses plausibility annotations.
|
| 181 |
+
|
| 182 |
+
<table><tr><td></td><td>Tuned √</td><td>Tuned X</td><td></td><td>LiPoR √</td><td>LiPoR X</td></tr><tr><td>ZS √</td><td>1140</td><td>419</td><td>Tuned √</td><td>1449</td><td>309</td></tr><tr><td>ZS X</td><td>618</td><td>882</td><td>Tuned X</td><td>767</td><td>534</td></tr></table>
|
| 183 |
+
|
| 184 |
+
Table 4: Left: Comparison between ZS BART and Tuned BART on αNLI. Right: Comparison between Tuned BART and LiPoR. $\{*\}$ √ and $\{*\} \times$ denote the number of instances for which plausible explanations are correctly / incorrectly identified by $\{*\}$ , respectively.
|
| 185 |
+
|
| 186 |
+
<table><tr><td></td><td>Example</td><td>Y</td><td>N</td></tr><tr><td>x:</td><td>Sally went to Italy in the spring.</td><td></td><td></td></tr><tr><td rowspan="2">z:</td><td>Sally took a lot of pictures when she went sightseeing.</td><td>71.7</td><td>50.0</td></tr><tr><td>Sally took pictures at every place she visited.</td><td>28.3</td><td>50.0</td></tr><tr><td>y:</td><td>When she got home, Sally showed her pictures to all her friends.</td><td></td><td></td></tr><tr><td>x:</td><td>Mike didn’t study for a test.</td><td></td><td></td></tr><tr><td rowspan="2">z:</td><td>Mike was normally a good student.</td><td>100</td><td>50.0</td></tr><tr><td>Everyone in class failed the test except for Mike.</td><td>0</td><td>50.0</td></tr><tr><td>y:</td><td>The teacher was very disappointed.</td><td></td><td></td></tr><tr><td>?</td><td>LiPoR assigns close probabilities to the indistinguish-ably likely explanations, while the supervised model collapses to one of the explanations.</td><td></td><td></td></tr></table>
|
| 187 |
+
|
| 188 |
+
# high confidence.
|
| 189 |
+
|
| 190 |
+
The two examples on the bottom are those for which Tuned BART fails to identify the plausible explanation because one explanation is short and the other is long. Again, LiPoR is able to correct these mistakes. Furthermore, the probability produced by LiPoR for each explanation also reflects the model's confidence to a certain degree. In the first example, "we met a golden retriever puppy and he played with us" is a much better explanation than "we were rained on," because one does not need to go to a park to experience rain. As a result, the difference between probabilities for the two explanations is $92.2\%$ . For the second example, "we had an amazing time" could refer to
|
| 191 |
+
|
| 192 |
+
Table 5: Comparison between posterior probabilities for each explanation produced by a RoBERTa model trained with plausibility annotations (Y) and LiPoR (N) on individual test examples, respectively.
|
| 193 |
+
|
| 194 |
+
<table><tr><td></td><td>Example</td><td>-PR</td><td>+PR</td></tr><tr><td>x:</td><td>I love taking long warm showers.</td><td></td><td></td></tr><tr><td rowspan="2">z:</td><td>Showers make me sleepy.</td><td>50.3</td><td>6.0</td></tr><tr><td>Doing what you love is important.</td><td>49.7</td><td>94.0</td></tr><tr><td>y:</td><td>That's why I take two of them every day.</td><td></td><td></td></tr><tr><td>x:</td><td>Neil wanted to see the mountains of Asia.</td><td></td><td></td></tr><tr><td rowspan="2">z:</td><td>Neil booked a tripped online.</td><td>47.5</td><td>64.0</td></tr><tr><td>Neil took a trip to see the Rocky moun-tains instead.</td><td>52.5</td><td>36.0</td></tr><tr><td>y:</td><td>Neil loved being so close to the moun-tains in Nepal!</td><td></td><td></td></tr><tr><td colspan="4">Fine-tuning (-PR) looks at superficial word co-occurrences, but LiPoR (+PR) tries to understand the true context.</td></tr></table>
|
| 195 |
+
|
| 196 |
+
<table><tr><td></td><td>Example</td><td>-PR</td><td>+PR</td></tr><tr><td>x:</td><td>We went to the park today.</td><td></td><td></td></tr><tr><td rowspan="2">z:</td><td>We were rained on!</td><td>53.5</td><td>3.9</td></tr><tr><td>We met a golden retriever puppy and he played with us.</td><td>46.5</td><td>96.1</td></tr><tr><td>y:</td><td>I love going to the park!</td><td></td><td></td></tr><tr><td>x:</td><td>Before my lunch time I got a phone call.</td><td></td><td></td></tr><tr><td rowspan="2">z:</td><td>My best friend wanted to go on a trip.</td><td>50.5</td><td>40.9</td></tr><tr><td>My best friend wanted to try a new restaurant for lunch.</td><td>49.5</td><td>59.1</td></tr><tr><td>y:</td><td>We had an amazing time!</td><td></td><td></td></tr><tr><td colspan="4">LiPoR (+PR) is able to correct the bias towards shorter explanations.</td></tr></table>
|
| 197 |
+
|
| 198 |
+
Table 6: Comparison between posterior probabilities for each explanation produced by fine-tuning (-PR) and LiPoR (+PR) on individual test examples, respectively. The two tables consist of examples where LiPoR successfully corrects the mistakes made by fine-tuning. The plausible explanation labeled by human annotators are in boldface.
|
| 199 |
+
|
| 200 |
+
both trying out a new restaurant and going on a trip. The phone call was received before lunch time makes the second explanation more likely, but the first explanation can still be what actually happened. As a result, LiPoR assigns $40.9\%$ to the "trip" explanation and $59.1\%$ to the "restaurant" explanation, leading to a smaller gap than that of the first example.
|
| 201 |
+
|
| 202 |
+
# 8 Conclusion
|
| 203 |
+
|
| 204 |
+
We introduce LiPoR, which fine-tunes pretrained language models on abductive reasoning tasks without plausibility annotations. Results show that LiPoR achieves comparable performance to that of knowledge-augmented zero-shot methods.
|
| 205 |
+
|
| 206 |
+
# Ethical Statement
|
| 207 |
+
|
| 208 |
+
LiPoR shares similar concerns with other contemporary approaches for performing commonsense reasoning. Specifically, because LiPoR exploits the knowledge already present in pretrained language models, it can potentially reinforce existing harmful biases in such models.
|
| 209 |
+
|
| 210 |
+
# Acknowledgement
|
| 211 |
+
|
| 212 |
+
AR and JC are supported by a Sloan Fellowship, NSF CAREER #2037519, and NSF #1901030. CC and WZ are supported by NSF #1815455.
|
| 213 |
+
|
| 214 |
+
# References
|
| 215 |
+
|
| 216 |
+
Henning Andersen. 1973. Abductive and deductive change. Language, pages 765-793.
|
| 217 |
+
Forough Arabshahi, Jennifer Lee, Mikayla Gawarecki, Kathryn Mazaitis, Amos Azaria, and Tom Mitchell. 2021. Conversational neuro-symbolic commonsense reasoning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 4902-4911.
|
| 218 |
+
Pratyay Banerjee and Chitta Baral. 2020. Self-supervised knowledge triplet learning for zero-shot question answering. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 151-162.
|
| 219 |
+
Chandra Bhagavatula, Ronan Le Bras, Chaitanya Malaviya, Keisuke Sakaguchi, Ari Holtzman, Hannah Rashkin, Doug Downey, Wen-tau Yih, and Yejin Choi. 2019. Abductive commonsense reasoning. In International Conference on Learning Representations.
|
| 220 |
+
Sid Black, Gao Leo, Phil Wang, Connor Leahy, and Stella Biderman. 2021. GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow. If you use this software, please cite it using these metadata.
|
| 221 |
+
Antoine Bosselut, Ronan Le Bras, and Yejin Choi. 2021. Dynamic neuro-symbolic knowledge graph construction for zero-shot commonsense question answering. In Proceedings of the 35th AAAI Conference on Artificial Intelligence (AAAI).
|
| 222 |
+
Antoine Bosselut, Hannah Rashkin, Maarten Sap, Chaitanya Malaviya, Asli Celikyilmaz, and Yejin Choi.
|
| 223 |
+
|
| 224 |
+
2019. Comet: Commonsense transformers for automatic knowledge graph construction. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4762-4779.
|
| 225 |
+
Kaj Bostrom, Xinyu Zhao, Swarat Chaudhuri, and Greg Durrett. 2021. Flexible generation of natural language deductions. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6266-6278.
|
| 226 |
+
Samuel Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. 2015. A large annotated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 632-642.
|
| 227 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
|
| 228 |
+
Eugene Charniak and Solomon E Shimony. 1990. Probabilistic semantics for cost based abduction. In Proceedings of the eighth National conference on Artificial intelligence-Volume 1, pages 106-111.
|
| 229 |
+
Di Chen, Yiwei Bai, Wenting Zhao, Sebastian Ament, John Gregoire, and Carla Gomes. 2020. Deep reasoning networks for unsupervised pattern de-mixing with constraint reasoning. In International Conference on Machine Learning, pages 1500-1509. PMLR.
|
| 230 |
+
Zi-Yi Dou and Nanyun Peng. 2022. Zero-shot commonsense question answering with cloze translation and consistency optimization. In The Thirty-Sixth AAAI Conference on Artificial Intelligence (AAAI).
|
| 231 |
+
Yanai Elazar, Hongming Zhang, Yoav Goldberg, and Dan Roth. 2021. Back to square one: Artifact detection, training and commonsense disentanglement in the winograd schema. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 10486-10500.
|
| 232 |
+
Maxwell Forbes, Jena D Hwang, Vered Shwartz, Maarten Sap, and Yejin Choi. 2020. Social chemistry 101: Learning to reason about social and moral norms. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 653-670.
|
| 233 |
+
Kuzman Ganchev, Joao Graça, Jennifer Gillenwater, and Ben Taskar. 2010. Posterior regularization for structured latent variable models. The Journal of Machine Learning Research, 11:2001-2049.
|
| 234 |
+
Mor Geva, Yoav Goldberg, and Jonathan Berant. 2019. Are we modeling the task or the annotator? an investigation of annotator bias in natural language understanding datasets. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference
|
| 235 |
+
|
| 236 |
+
on Natural Language Processing (EMNLP-IJCNLP), pages 1161-1166.
|
| 237 |
+
Andrew S. Gordon and Jerry R. Hobbs. 2017. Explanation, page 299-305. Cambridge University Press.
|
| 238 |
+
Canming Huang, Weinan He, and Yongmei Liu. 2021. Improving unsupervised commonsense reasoning using knowledge-enabled natural language inference. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 4875-4885.
|
| 239 |
+
Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2016. Rationalizing neural predictions. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 107-117.
|
| 240 |
+
Hector Levesque, Ernest Davis, and Leora Morgenstern. 2012. The winograd schema challenge. In Thirteenth international conference on the principles of knowledge representation and reasoning.
|
| 241 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880.
|
| 242 |
+
Jingjing Li, Zichao Li, Lili Mou, Xin Jiang, Michael Lyu, and Irwin King. 2020. Unsupervised text generation by learning from search. Advances in Neural Information Processing Systems, 33:10820-10831.
|
| 243 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.
|
| 244 |
+
Kaixin Ma, Filip Ilievski, Jonathan Francis, Yonatan Bisk, Eric Nyberg, and Alessandro Oltramari. 2021. Knowledge-driven data construction for zero-shot evaluation in commonsense question answering. In 35th AAAI Conference on Artificial Intelligence.
|
| 245 |
+
Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. 2022. Show your work: Scratchpads for intermediate computation with language models. In Deep Learning for Code Workshop.
|
| 246 |
+
Gabriele Paul. 1993. Approaches to abductive reasoning: an overview. Artificial intelligence review, 7(2):109-152.
|
| 247 |
+
Judea Pearl and Dana Mackenzie. 2018. The book of why: the new science of cause and effect. Basic books.
|
| 248 |
+
|
| 249 |
+
Lianhui Qin, Vered Shwartz, Peter West, Chandra Bhagavatula, Jena D Hwang, Ronan Le Bras, Antoine Bosselut, and Yejin Choi. 2020. Back to the future: Unsupervised backprop-based decoding for counterfactual and abductive commonsense reasoning. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 794-805.
|
| 250 |
+
Rajat Raina, Andrew Y Ng, and Christopher D Manning. 2005. Robust textual inference via learning and abductive reasoning. In AAAI, pages 1099-1105.
|
| 251 |
+
Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. 2019. Explain yourself! leveraging language models for commonsense reasoning. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4932-4942.
|
| 252 |
+
Rachel Rudinger, Vered Shwartz, Jena D. Hwang, Chandra Bhagavatula, Maxwell Forbes, Ronan Le Bras, Noah A. Smith, and Yejin Choi. 2020. Thinking like a skeptic: Defeasible inference in natural language. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 4661-4675, Online. Association for Computational Linguistics.
|
| 253 |
+
Maarten Sap, Ronan Le Bras, Emily Allaway, Chandra Bhagavatula, Nicholas Lourie, Hannah Rashkin, Brendan Roof, Noah A Smith, and Yejin Choi. 2019. Atomic: An atlas of machine commonsense for if-then reasoning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 3027-3035.
|
| 254 |
+
Vered Shwartz, Peter West, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. 2020. Unsupervised commonsense question answering with self-talk. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4615-4629.
|
| 255 |
+
Shane Storks, Qiaozi Gao, and Joyce Y Chai. 2019. Recent advances in natural language inference: A survey of benchmarks, resources, and approaches. arXiv preprint arXiv:1904.01172.
|
| 256 |
+
Oyvind Tafjord, Bhavana Dalvi, and Peter Clark. 2021. Proofwriter: Generating implications, proofs, and abductive statements over natural language. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 3621-3634.
|
| 257 |
+
Paul Thagard and Cameron Shelley. 1997. Abductive reasoning: Logic, visual thinking, and coherence. In *Logic and scientific methods*, pages 413-427. Springer.
|
| 258 |
+
Keyon Vafa, Yuntian Deng, David Blei, and Alexander M Rush. 2021. Rationales for sequential predictions. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 10314-10332.
|
| 259 |
+
|
| 260 |
+
Jesse Vig, Sebastian Gehrmann, Yonatan Belinkov, Sharon Qian, Daniel Nevo, Yaron Singer, and Stuart Shieber. 2020. Investigating gender bias in language models using causal mediation analysis. In Advances in Neural Information Processing Systems, volume 33, pages 12388-12401. Curran Associates, Inc.
|
| 261 |
+
Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 353-355.
|
| 262 |
+
Cunxiang Wang, Shuailong Liang, Yili Jin, Yilong Wang, Xiaodan Zhu, and Yue Zhang. 2020. SemEval-2020 task 4: Commonsense validation and explanation. In Proceedings of The 14th International Workshop on Semantic Evaluation. Association for Computational Linguistics.
|
| 263 |
+
Cunxiang Wang, Shuai long Liang, Yue Zhang, Xiaonan Li, and Tian Gao. 2019. Does it make sense? and why? a pilot study for sense making and explanation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4020-4026.
|
| 264 |
+
Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V Le. 2022a. Finetuned language models are zero-shot learners. In International Conference on Learning Representations.
|
| 265 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022b. Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903.
|
| 266 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 267 |
+
Hongming Zhang, Xinran Zhao, and Yangqiu Song. 2020. Winowhy: A deep diagnosis of essential commonsense knowledge for answering winograd schema challenge. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5736-5745.
|
| 268 |
+
Qian Zhao, Siyu Tao, Jie Zhou, Linlin Wang, Xin Lin, and Liang He. 2020. ECNU-SenseMaker at SemEval-2020 task 4: Leveraging heterogeneous knowledge resources for commonsense validation
|
| 269 |
+
|
| 270 |
+
and explanation. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 401-410, Barcelona (online). International Committee for Computational Linguistics.
|
| 271 |
+
Pei Zhou, Behnam Hedayatnia, Karthik Gopalakrishnan, Seokhwan Kim, Jay Pujara, Xiang Ren, Yang Liu, and Dilek Hakkani-Tur. 2021a. Think before you speak: Learning to generate implicit knowledge for response generation by self-talk. In Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI, pages 251-253, Online. Association for Computational Linguistics.
|
| 272 |
+
Pei Zhou, Pegah Jandaghi, Hyundong Cho, Bill Yuchen Lin, Jay Pujara, and Xiang Ren. 2021b. Probing commonsense explanation in dialogue response generation. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4132-4146, Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 273 |
+
Wangchunshu Zhou, Jinyi Hu, Hanlin Zhang, Xiaodan Liang, Maosong Sun, Chenyan Xiong, and Jian Tang. 2020. Towards interpretable natural language understanding with explanations as latent variables. Advances in Neural Information Processing Systems, 33:6803-6814.
|
| 274 |
+
|
| 275 |
+
# A Additional Experiments
|
| 276 |
+
|
| 277 |
+
How do models with different architectures and sizes perform at abductive reasoning? Table 7 summarizes the results on the $\alpha$ NLI dataset with different model architectures and model sizes, which are obtained from the same grid search described in Sec. 5. Within the same architecture, models with more parameters are better at abductive reasoning. When comparing between BART and T5, BART can produce consistent better results at each size.
|
| 278 |
+
|
| 279 |
+
Does a learnable $p(z|x)$ model lead to better performance? Here we test if a learnable $p(z|x)$ model instead of a uniform $p(z|x)$ model leads to better performance. We should note that a learnable $p(z|x)$ model may result in reasoning shortcuts: because if the signal from $p(z|x)$ is too strong, then this term will dominate Eq. 2; thus, $p(z|x,y)$ computed in this way is no longer a result of thinking backwards. We parametrize the learnable $p(z|x)$ model by a BART-large model, which takes $x$ as an input and returns a probability distribution over all sequences. Table 8 shows the comparison between the two $p(z|x)$ models on the $\alpha$ NLI dataset. Although the uniform $p(z|x)$ model outperforms the learnable $p(z|x)$ model, the difference between them is not significant.
|
| 280 |
+
|
| 281 |
+
How do methods without plausibility annotations perform in presence of distractors? In order to test the robustness of different methods without plausibility annotations, we evaluate them on two types of distractors added to the $\alpha$ NLI test set. The first type of distractor randomly samples a third explanation from another example, and the second type of distractor constructs a third explanation with randomly sampled words from the vocabulary of the $\alpha$ NLI dataset with a length that falls in-between the lengths of the two original explanations. Table 8 compares the results with and without the distractors. Notice that after adding a third option, the chance of getting the plausible explanation with a random guess is $\frac{1}{3}$ . LiPoR's accuracy drops significantly with the presence of distractors, while the relative decrease for GPT NEO is smaller. Furthermore, the zero-shot results (i.e., ZS and GPT NEO) suggest that it is more difficult to identify the first type of distractor than the second one. Our interpretation for a worse performing LiPoR's on distractors is that the distrators break our assumption: $p(z|x)$ is no longer uniform, and
|
| 282 |
+
|
| 283 |
+
<table><tr><td></td><td>BART</td><td>T5</td></tr><tr><td>small</td><td>-</td><td>54.14</td></tr><tr><td>base</td><td>60.08</td><td>57.31</td></tr><tr><td>large</td><td>71.56</td><td>65.48</td></tr></table>
|
| 284 |
+
|
| 285 |
+
Table 7: Comparison between different model architectures and model sizes on the $\alpha$ NLI dataset.
|
| 286 |
+
|
| 287 |
+
<table><tr><td></td><td>Original</td><td>+Rand. E's</td><td>+Rand. W's</td></tr><tr><td>GPT NEO</td><td>57.47</td><td>51.12</td><td>57.37</td></tr><tr><td>ZS</td><td>50.96</td><td>34.39</td><td>38.22</td></tr><tr><td>LL</td><td>57.40</td><td>53.48</td><td>53.52</td></tr><tr><td>LiPoR w/ unif. p(z|x)</td><td>71.56</td><td>58.58</td><td>57.40</td></tr><tr><td>LiPoR w/ learned p(z|x)</td><td>69.92</td><td>59.14</td><td>59.24</td></tr></table>
|
| 288 |
+
|
| 289 |
+
Table 8: Comparison between different unsupervised approaches on the $\alpha$ NLI test set. +Rand. E's is adding a random explanation taken from another example. +Rand. W's is adding random words from the vocabulary of $\alpha$ NLI whose length is between the lengths of two original explanations. Best results for each setting is in boldface.
|
| 290 |
+
|
| 291 |
+
the probability of a distracting explanation is independent of the probability of $x$ . Therefore, the original factorization in Eq. 1 no longer applies. To build an unsupervised system that is robust to distractors requires incorporating the new assumptions in the data generating process.
|
| 292 |
+
|
| 293 |
+
# A For every submission:
|
| 294 |
+
|
| 295 |
+
A1. Did you describe the limitations of your work?
|
| 296 |
+
|
| 297 |
+
Left blank.
|
| 298 |
+
|
| 299 |
+
A2. Did you discuss any potential risks of your work?
|
| 300 |
+
|
| 301 |
+
Left blank.
|
| 302 |
+
|
| 303 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 304 |
+
|
| 305 |
+
Left blank.
|
| 306 |
+
|
| 307 |
+
□ A4. Have you used AI writing assistants when working on this paper?
|
| 308 |
+
|
| 309 |
+
Left blank.
|
| 310 |
+
|
| 311 |
+
# B Did you use or create scientific artifacts?
|
| 312 |
+
|
| 313 |
+
Left blank.
|
| 314 |
+
|
| 315 |
+
B1. Did you cite the creators of artifacts you used?
|
| 316 |
+
|
| 317 |
+
Left blank.
|
| 318 |
+
|
| 319 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts?
|
| 320 |
+
|
| 321 |
+
Left blank.
|
| 322 |
+
|
| 323 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?
|
| 324 |
+
|
| 325 |
+
Left blank.
|
| 326 |
+
|
| 327 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 328 |
+
|
| 329 |
+
Left blank.
|
| 330 |
+
|
| 331 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?
|
| 332 |
+
|
| 333 |
+
Left blank.
|
| 334 |
+
|
| 335 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 336 |
+
|
| 337 |
+
Left blank.
|
| 338 |
+
|
| 339 |
+
# C Did you run computational experiments?
|
| 340 |
+
|
| 341 |
+
Left blank.
|
| 342 |
+
|
| 343 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?
|
| 344 |
+
|
| 345 |
+
Left blank.
|
| 346 |
+
|
| 347 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 348 |
+
|
| 349 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? Left blank.
|
| 350 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run? Left blank.
|
| 351 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? Left blank.
|
| 352 |
+
|
| 353 |
+
# D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 354 |
+
|
| 355 |
+
Left blank.
|
| 356 |
+
|
| 357 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? Left blank.
|
| 358 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? Left blank.
|
| 359 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? Left blank.
|
| 360 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? Left blank.
|
| 361 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? Left blank.
|
abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa68f10652afbdd5e48d658a9995b9f76e42647821f0025e7611906c51b0c519
|
| 3 |
+
size 474751
|
abductivecommonsensereasoningexploitingmutuallyexclusiveexplanations/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb9971eacfd1d784edbd69f30d6af6ca41a940af0049907333d2e4aae1c6da84
|
| 3 |
+
size 482046
|
acceleratingtransformerinferencefortranslationviaparalleldecoding/4090c341-5ebb-47f1-b801-0ff756c1c814_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:04f45df0529a9c3e1780916a736717ca57867ce0fb069061998a733e2c4ec4cd
|
| 3 |
+
size 131652
|
acceleratingtransformerinferencefortranslationviaparalleldecoding/4090c341-5ebb-47f1-b801-0ff756c1c814_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:725689dad72c3acda1a6712ad4f0681ac87df0cc94c17bf2d5c0ee23bbdc3f56
|
| 3 |
+
size 159063
|
acceleratingtransformerinferencefortranslationviaparalleldecoding/4090c341-5ebb-47f1-b801-0ff756c1c814_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad8c5efbf41c76b39ec4b1e752e6538b45c43cc8223e3f497b45d71e36e22d43
|
| 3 |
+
size 1498118
|
acceleratingtransformerinferencefortranslationviaparalleldecoding/full.md
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Transformer Inference for Translation via Parallel Decoding
|
| 2 |
+
|
| 3 |
+
Andrea Santilli<sup>1</sup>, Silvio Severino<sup>1</sup>, Emilian Postolache<sup>1</sup>, Valentino Maiorca<sup>1</sup>, Michele Mancusi<sup>1</sup>, Riccardo Marin<sup>2,3</sup>, Emanuele Rodolà<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ Sapienza University of Rome $^{2}$ University of Tübingen
|
| 6 |
+
|
| 7 |
+
$^{3}$ Tübingen AI Center
|
| 8 |
+
|
| 9 |
+
santilli@di.uniroma1.it
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Autoregressive decoding limits the efficiency of transformers for Machine Translation (MT). The community proposed specific network architectures and learning-based methods to solve this issue, which are expensive and require changes to the MT model, trading inference speed at the cost of the translation quality. In this paper, we propose to address the problem from the point of view of decoding algorithms, as a less explored but rather compelling direction. We propose to reframe the standard greedy autoregressive decoding of MT with a parallel formulation leveraging Jacobi and Gauss-Seidel fixed-point iteration methods for fast inference. This formulation allows to speed up existing models without training or modifications while retaining translation quality. We present three parallel decoding algorithms and test them on different languages and models showing how the parallelization introduces a speedup up to $38\%$ w.r.t. the standard autoregressive decoding and nearly 2x when scaling the method on parallel resources. Finally, we introduce a decoding dependency graph visualizer (DDGviz) that let us see how the model has learned the conditional dependence between tokens and inspect the decoding procedure.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
In recent years there have been dramatic improvements in Machine Translation (MT) (Edunov et al., 2018; Liu et al., 2020) thanks to the transition to neural models and the advent of the Transformer architecture (Vaswani et al., 2017). These models can produce high-quality translations while being extremely parallelizable during training. However, Transformers are used sequentially at inference time, generating one token per time (i.e., sending each token as input for the next autoregressive iteration). This process of autoregressive inference hampers the efficiency of neural machine translation systems in terms of latency, limiting applications and portability. Considering that these systems are
|
| 18 |
+
|
| 19 |
+
extensively used in production multiple times to produce new translations (e.g., Google Translate $^{1}$ , DeepL Translator $^{2}$ ), even a minor speedup would be beneficial in the long run, especially if the translation is done on embedded devices.
|
| 20 |
+
|
| 21 |
+
To address this issue, the community proposed ad-hoc trained models specific for parallel machine translation under the umbrella term of Non-Autoregressive Machine Translation models (NAT) (Gu et al., 2018). These models produce the translation in parallel but require (i) a complete reengineering of the MT system, (ii) extensive training resources and (iii) complex design choices like distillation from larger autoregressive models. These requirements are quite demanding and not easily satisfiable. For example, production systems are heavily optimized for hardware and software and even introducing a minimal modification requires non-trivial human effort (Wu et al., 2016; Kim et al., 2019). Furthermore, training a new model from scratch is not always possible due to non-released training data or low-resource languages having few or lacking parallel corpora.
|
| 22 |
+
|
| 23 |
+
In this paper, we propose to address the problem of parallel machine translation with an orthogonal approach consisting in novel decoding algorithms that work in parallel and can be used on top of existing autoregressive models for MT. We overcome previous limitations with a flexible and generic method that does not require any modification to the model or costly retraining. Specifically, inspired by previous successes in speeding up feedforward computation for image generation (Song et al., 2021b), we reframe the greedy autoregressive decoding for MT as a system of nonlinear equations solvable in parallel. This simple formulation speeds up the decoding procedure by using fixed-point iteration methods like Jacobi and Gauss-Seidel while having mathematical guarantees on
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: On the left, the classical Autoregressive Decoding for MT. The target sentence is produced token-by-token sequentially, sending the partial result as input for the next autoregressive iteration up to the length $m$ of the target. On the right Parallel Decoding proposed in this paper. This method changes only the decoding algorithm (orange block) and is usable on top of any autoregressive model without modifications. Parallel Decoding algorithms resolve the whole sentence or a block of $b$ tokens in parallel: initial tokens (PAD tokens) are gradually refined with $k$ steps until a stopping condition is reached. Crucially, $k \leqslant m$ with quality guarantees and overall decoding speedups.
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
the quality of the translation. A high-level description of the method is available in (Fig. 1). Our contributions can be summarized as the following:
|
| 31 |
+
|
| 32 |
+
- We reframe the standard greedy autoregressive decoding procedure in MT with a parallel formulation, introducing three parallel decoding algorithms (PJ, PGJ, HGJ) and a stopping condition that preserves translation quality.
|
| 33 |
+
- We perform extensive experiments with different transformer sizes (base and large) and datasets, showing speedups up to $38\%$ in time, obtaining a nearly $2\times$ speedup when scaling the model on parallel resources while preserving quality. To the best of our knowledge, this is one of the first studies to introduce a speedup in multilingual machine translation.
|
| 34 |
+
- We introduce a decoding dependency graph visualizer (DDGviz) to inspect the learned tokens' conditional dependence and when parallel decoding is effective.
|
| 35 |
+
|
| 36 |
+
All the code is publicly released<sup>3</sup>.
|
| 37 |
+
|
| 38 |
+
# 2 Related Work
|
| 39 |
+
|
| 40 |
+
Gu et al. (2018) first introduced Non-Autoregressive Translation models (NAT) as ad-hoc trained models capable of producing the translation all at once in parallel. With NATs, it is possible to consistently reduce the latency and speed up the translation at the expense of a slightly worse translation quality due to the multimodality problem (i.e., we lose the dependency between tokens in the target output). Finding a tradeoff between translation quality and speed is an active
|
| 41 |
+
|
| 42 |
+
research direction, with current methods trying to fill the gap in terms of translation quality (Geng et al., 2021; Savinov et al., 2022). Nevertheless, all proposed NAT models are learning-based and require different tricks to reach the quality of autoregressive models (Gu and Kong, 2021). The most common is the sequence-level knowledge distillation of large autoregressive models into parallel models (Kim and Rush, 2016). Other approaches include defining alternative training objectives (Ghazvininejad et al., 2020a; Sahara et al., 2020; Du et al., 2021; Huang et al., 2021), architectures that model dependencies between output sentence tokens (Ghazvininejad et al., 2019; Qian et al., 2021; Song et al., 2021a; Gu and Kong, 2021; Song et al., 2022) or multi-iteration methods (Ghazvininejad et al., 2020b; Kasai et al., 2020; Hao et al., 2021; Geng et al., 2021; Savinov et al., 2022; Huang et al., 2022; Xia et al., 2022) that apply iterative refinements to a translation, trading some speed for greater quality. In our approach, we also employ iterative refinements of solutions to non-linear equations, but we do not perform any training or modification to the model. Other works that require retraining or modifications to the model add additional decoding heads (Stern et al., 2018) or use shallow decoders (Kasai et al., 2021). We refer the reader to Xiao et al. (2022) for a thorough survey on NAT methods. Further orthogonal approaches use specialized hardware (TPU) with low-precision calculations (Wu et al., 2016) or software optimizations (Kim et al., 2019). In the context of Grammatical Error Correction, Sun et al. (2021) recently proposed aggressive parallel decoding, assuming that the model output is similar to the input. More recently, inspiring our work, Song et al. (2021b) showed that it is possible to parallelize feedforward computations
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
Figure 2: Parallel Decoding algorithms: PJ resolves the whole sequence in parallel iteratively. PGJ resolves blocks in parallel; once a block is finished, it moves on to the next one and decodes it again in parallel (in figure $b = 3$ ). HGJ decodes the sentence in parallel as PGJ up to a certain length $h$ ; afterwards, it goes autoregressively until [EOS] token is generated. Decoding actually happens in sub-word tokens (not depicted here).
|
| 46 |
+
|
| 47 |
+
by thinking of them as a system of non-linear equations. They parallelized the backpropagation of RNNs, feedforward layers and autoregressive generative models on images. We extend the approach defined on dense pixel prediction to the discrete conditional token generation in MT. While this work was under submission and anonymity period, Leviathan et al. (2022), Chen et al. (2023) and Kim et al. (2023) concurrently proposed decoding approaches that speed up inference of a large transformer model by using another smaller model to draft tokens. Compared to these approaches our method requires just an existing autoregressive model (no matter the size) and mathematically guarantees the output quality. In the next Section we describe the method.
|
| 48 |
+
|
| 49 |
+
# 3 Method
|
| 50 |
+
|
| 51 |
+
In this Section, we introduce notations, develop the theory behind Parallel Decoding, present three algorithms (Fig. 2), and discuss the initialization and stopping conditions for the proposed approaches.
|
| 52 |
+
|
| 53 |
+
# 3.1 Notation
|
| 54 |
+
|
| 55 |
+
The goal of MT is to translate a sentence $\mathbf{x}$ in a source language (e.g., Italian) with its translation $\mathbf{y}$ in the target language (e.g., English). Source and target sentences are generally tokenized in words or subwords (Kudo and Richardson, 2018; Schuster and Nakajima, 2012; Sennrich et al., 2016; Kudo, 2018); here, we use the suffix notation $\mathbf{x} = (x_{1},\ldots ,x_{n})$ and $\mathbf{y} = (y_{1},\ldots ,y_{m})$ to indicate specific tokens in the sequence. We also use the notation $\mathbf{x}_{1:n}$ to indicate a slice of a sequence as a shorthand of $\mathbf{x} = (x_{1},\dots,x_{n})$ . From a probabilistic perspective, an MT model estimates $p_{\theta}(\mathbf{y}\mid \mathbf{x})$ . Once an MT model has been trained, the inference phase is traditionally performed by sampling tokens from the model probability conditioned on the input sequence $\mathbf{x}$ and previously generated tokens $(y_{1},\ldots ,y_{i - 1})$ :
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
p _ {\theta} \left(y _ {i} \mid y _ {1}, \dots , y _ {i - 1}, \mathbf {x}\right). \tag {1}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
Different sampling strategies are employed (e.g., Greedy, Top-K, Top-p (Kool et al., 2020; Holtzman
|
| 62 |
+
|
| 63 |
+
et al., 2020)) alongside search strategies that estimate the total conditional probability (e.g., Greedy search, Beam search (Reddy, 1977)). The most straightforward strategy, Greedy Search, selects the element $y_{i}$ of a sequence with:
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
y _ {i} = \arg \max p _ {\theta} \left(y _ {i} \mid \mathbf {y} _ {1: i - 1}, \mathbf {x}\right). \tag {2}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
Given the formalization above, a standard autoregressive setting runs $m$ inference steps sequentially to generate an output sequence of $m$ elements.
|
| 70 |
+
|
| 71 |
+
Parallel Decoding. Given Equation (2), it is possible to write the greedy decoding procedure on all tokens as:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\left\{ \begin{array}{l} y _ {1} = \arg \max p _ {\theta} \left(y _ {1} \mid \mathbf {x}\right) \\ y _ {2} = \arg \max p _ {\theta} \left(y _ {2} \mid y _ {1}, \mathbf {x}\right) \\ \vdots \\ y _ {m} = \arg \max p _ {\theta} \left(y _ {m} \mid \mathbf {y} _ {1: m - 1}, \mathbf {x}\right) \end{array} \right. \tag {3}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
Defining $f(y_{i},\mathbf{y}_{1:i - 1},\mathbf{x}) = y_{i} - \arg \max p_{\theta}(y_{i}\mid \mathbf{y}_{1:i - 1},\mathbf{x})$ , we can rewrite the system of Equations (3) as:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\left\{ \begin{array}{l} f \left(y _ {1}, \mathbf {x}\right) = 0 \\ f \left(y _ {2}, y _ {1}, \mathbf {x}\right) = 0 \\ \vdots \\ f \left(y _ {m}, \mathbf {y} _ {1: m - 1}, \mathbf {x}\right) = 0 \end{array} \right. \tag {4}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
This system has $m$ non-linear equations (each equation employs a neural network) with $m$ variables.
|
| 84 |
+
|
| 85 |
+
# 3.2 Parallel Decoding Algorithms
|
| 86 |
+
|
| 87 |
+
The autoregressive decoding implicitly solves the system of Equations (4) by substitution, i.e., given the [BOS] token and the input sentence $x$ , it solves equations from first to last, progressively replacing the resolved variables. In this paper, we rely on Jacobi and Gauss-Seidel (GS) fixed-point iteration methods (Ortega and Rheinboldt, 1970) to solve in parallel system (4) until a stopping condition is reached. This formulation is particularly flexible and has several advantages: Firstly, it is completely agnostic to the underlying MT model used; Secondly, it can be analyzed with analytical tools and
|
| 88 |
+
|
| 89 |
+
has guarantees of convergence to the exact solution for system (4); Thirdly, it can be potentially extended by drawing from the numerical methods literature for non-linear equations solving methods (Saad, 2003). We see that, with the proper stopping condition, it is possible to have quality guarantees over the output. We present here three algorithms (PJ, PGJ, HGJ) that leverage these fixed-point iteration methods to speedup decoding in MT.
|
| 90 |
+
|
| 91 |
+
Parallel Jacobi (PJ) Decoding. First, we propose Algorithm 1. This algorithm works by initializing a draft translation for the whole target sentence and then iteratively translating the whole sentence in parallel until the stopping condition is triggered. This is equivalent to solving system (4) with Jacobi, hence the name of the method.
|
| 92 |
+
|
| 93 |
+
Parallel GS-Jacobi (PGJ) Decoding. Decoding the whole target sentence in parallel may introduce difficulties in inferring long dependencies between tokens since the underlying model is trained to model the conditional distribution of a token given the previous tokens. In general, we observed that shorter dependencies are easily predicted since decoding happens at the sub-word level, and the model can decode sub-word unities in parallel rather than the whole sentence. To this end, we propose Algorithm 2, called GS-Jacobi, that splits the sentence into contiguous $b$ -dimensional blocks. Starting from the first one, it decodes in parallel all its elements. Once a block is finished or the stopping condition within the block is triggered, the algorithm performs a sequential (Gauss-Seidel) step and proceeds with (Jacobi) decoding on the next one.
|
| 94 |
+
|
| 95 |
+
Hybrid GS-Jacobi (HGJ) Decoding. Algorithms 1 and 2 assume to know beforehand the number of equations $m$ (i.e., the target length). This is not usually the case for MT, where the model dynamically controls the length through the emission of a special end-of-sentence token [EOS]. To overcome this issue, we propose a flexible Hybrid Algorithm 3 that mixes PGJ computations with standard autoregressive decoding. This algorithm performs parallel GS-Jacobi decoding up to a certain prefixed length $h$ . If the [EOS] token is generated within a block, then the algorithm stops, returning the translation up to [EOS]. Otherwise, the algorithm concludes the translation by reaching the [EOS] token with standard autoregressive decoding. In this case, the length $h$ regulates the trade-off between
|
| 96 |
+
|
| 97 |
+
Algorithm 1 Parallel Jacobi Decoding
|
| 98 |
+
Input: $\mathbf{x} = (x_{1},\dots ,x_{n})$ $p_\theta$
|
| 99 |
+
Output: $\mathbf{y} = (y_1,\ldots ,y_m)$
|
| 100 |
+
1: $\mathbf{y}\gets \mathrm{INIT}(\mathbf{x})$
|
| 101 |
+
2: $m\gets \text{len} (\mathbf{y})$
|
| 102 |
+
3: for $i = 1$ to m do
|
| 103 |
+
4: $\mathbf{o}\gets \mathrm{copy}(\mathbf{y}_{1:m})$
|
| 104 |
+
5: $\mathbf{y}_{1:m}\gets \arg \max (p_{\theta}(\mathbf{y}_{1:m}|\mathbf{y}_{1:m},\mathbf{x}))$
|
| 105 |
+
6: stop $\leftarrow$ STOPC(o,y1:m)
|
| 106 |
+
7: if stop then
|
| 107 |
+
8: break
|
| 108 |
+
9: end if
|
| 109 |
+
10: end for
|
| 110 |
+
11: return y
|
| 111 |
+
|
| 112 |
+
parallel and sequential computation, limiting the waste of resources beyond [EOS].
|
| 113 |
+
|
| 114 |
+
# 3.3 Initialization and Stopping
|
| 115 |
+
|
| 116 |
+
Our algorithms share two components: the initialization procedure and the stopping condition.
|
| 117 |
+
|
| 118 |
+
Initialization INIT(x). The initialization procedure is a function that inputs the source sentence and produces an initial draft translation as output. In this paper we experimented with a simple initialization procedure that initialize the translation with all [PAD] tokens. This choice is fast and doesn't depend on the underlying MT model. We leave as future work the research of different initialization procedures to further speedup the decoding.
|
| 119 |
+
|
| 120 |
+
Stopping Condition STOPC $(\mathbf{y}^{k - 1},\mathbf{y}^k)$ . The stopping condition is a function that takes as input the previous-iteration sentence $\mathbf{y}^{k - 1}$ and the current-iteration sentence $\mathbf{y}^k$ and decides whether to stop the algorithm or not. This function is crucial since it regulates the trade-off between speedup and translation quality. In this paper we introduce as stopping condition for MT:
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\mathbf {y} ^ {k - 1} - \mathbf {y} ^ {k} = \mathbf {0} \tag {5}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
i.e., the sentence from the previous step has not changed. This stop condition allows for preserving quality and quickening translations simultaneously.
|
| 127 |
+
|
| 128 |
+
# 3.4 Quality Guarantees
|
| 129 |
+
|
| 130 |
+
Compared to NAT methods which do not have any quality guarantee since a novel parallel model is trained from scratch, our formulation guarantees to have the same quality of using autoregressive decoding with the same MT model. System (4) is known in literature as a triangular system of $m$ equations with $m$ variables, this characterization allows to state an important property.
|
| 131 |
+
|
| 132 |
+
<table><tr><td rowspan="2">Decoding Algorithm</td><td colspan="2">en→de</td><td colspan="2">de→en</td><td colspan="2">en→ro</td><td colspan="2">ro→en</td></tr><tr><td>Speed</td><td>BLEU</td><td>Speed</td><td>BLEU</td><td>Speed</td><td>BLEU</td><td>Speed</td><td>BLEU</td></tr><tr><td>Opus</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Greedy Autoregressive</td><td>1.00×</td><td>28.24</td><td>1.00×</td><td>33.10</td><td>1.00×</td><td>27.41</td><td>1.00×</td><td>37.01</td></tr><tr><td>Beam Search (beam = 5)</td><td>0.71×</td><td>28.68</td><td>0.72×</td><td>33.92</td><td>0.70×</td><td>27.61</td><td>0.72×</td><td>37.84</td></tr><tr><td>PJ Decoding</td><td>0.73×</td><td>28.24</td><td>0.75×</td><td>33.10</td><td>0.66×</td><td>27.41</td><td>0.66×</td><td>37.01</td></tr><tr><td>PGJ Decoding (b = 5)</td><td>1.28×</td><td>28.24</td><td>1.32×</td><td>33.10</td><td>1.33×</td><td>27.41</td><td>1.29×</td><td>37.01</td></tr><tr><td>PGJ Decoding (b = 3)</td><td>1.34×</td><td>28.24</td><td>1.37×</td><td>33.10</td><td>1.38×</td><td>27.41</td><td>1.35×</td><td>37.01</td></tr><tr><td>HGJ Decoding (b = 3)</td><td>1.34×</td><td>28.24</td><td>1.37×</td><td>33.10</td><td>1.38×</td><td>27.41</td><td>1.35×</td><td>37.01</td></tr><tr><td>MBart50</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Greedy Autoregressive</td><td>1.00×</td><td>23.97</td><td>1.00×</td><td>31.58</td><td>1.00×</td><td>24.99</td><td>1.00×</td><td>34.77</td></tr><tr><td>Beam Search (beam = 5)</td><td>0.76×</td><td>24.93</td><td>0.77×</td><td>32.61</td><td>0.77×</td><td>25.31</td><td>0.76×</td><td>35.16</td></tr><tr><td>PJ Decoding</td><td>0.88×</td><td>23.97</td><td>0.88×</td><td>31.58</td><td>0.86×</td><td>24.99</td><td>0.85×</td><td>34.77</td></tr><tr><td>PGJ Decoding (b = 5)</td><td>0.98×</td><td>23.97</td><td>0.98×</td><td>31.58</td><td>0.97×</td><td>24.99</td><td>0.99×</td><td>34.77</td></tr><tr><td>PGJ Decoding (b = 3)</td><td>1.06×</td><td>23.97</td><td>1.08×</td><td>31.58</td><td>1.03×</td><td>24.99</td><td>1.04×</td><td>34.77</td></tr><tr><td>HGJ Decoding (b = 3)</td><td>1.05×</td><td>23.97</td><td>1.07×</td><td>31.58</td><td>1.01×</td><td>24.99</td><td>1.02×</td><td>34.77</td></tr></table>
|
| 133 |
+
|
| 134 |
+
Table 1: Comparison of parallel decoding algorithms (highlighted in grey) with sequential decoding using Opus (CPU) and MBart50 (GPU) on WMT14 and WMT16. Speed is measured in time w.r.t. the autoregressive baseline.
|
| 135 |
+
|
| 136 |
+
<table><tr><td rowspan="2">Dec. Algorithm</td><td rowspan="2">Speed</td><td colspan="2">WMT17
|
| 137 |
+
En-Fi</td><td colspan="2">IITB
|
| 138 |
+
En-Hi</td><td colspan="2">IWSLT15
|
| 139 |
+
En-Vi</td><td colspan="2">FLORES
|
| 140 |
+
En-It</td><td colspan="2">En-Fr</td></tr><tr><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td></tr><tr><td rowspan="2">PJ</td><td>Iters</td><td>1.04×</td><td>1.04×</td><td>1.04×</td><td>1.04×</td><td>1.06×</td><td>1.03×</td><td>1.02×</td><td>1.04×</td><td>1.03×</td><td>1.03×</td></tr><tr><td>Time</td><td>0.86×</td><td>0.88×</td><td>0.89×</td><td>0.89×</td><td>0.87×</td><td>0.86×</td><td>0.85×</td><td>0.86×</td><td>0.85×</td><td>0.85×</td></tr><tr><td rowspan="2">PGJ (b=3)</td><td>Iters</td><td>1.07×</td><td>1.09×</td><td>1.09×</td><td>1.09×</td><td>1.10×</td><td>1.07×</td><td>1.07×</td><td>1.08×</td><td>1.08×</td><td>1.11×</td></tr><tr><td>Time</td><td>1.01×</td><td>1.05×</td><td>1.05×</td><td>1.07×</td><td>1.04×</td><td>1.02×</td><td>1.02×</td><td>1.03×</td><td>1.03×</td><td>1.05×</td></tr><tr><td rowspan="2">HGJ (b=3)</td><td>Iters</td><td>1.05×</td><td>1.07×</td><td>1.07×</td><td>1.07×</td><td>1.07×</td><td>1.06×</td><td>1.07×</td><td>1.06×</td><td>1.05×</td><td>1.07×</td></tr><tr><td>Time</td><td>1.01×</td><td>1.03×</td><td>1.04×</td><td>1.05×</td><td>1.03×</td><td>1.01×</td><td>1.01×</td><td>1.02×</td><td>1.01×</td><td>1.03×</td></tr></table>
|
| 141 |
+
|
| 142 |
+
Table 2: Comparison over different languages in terms of speedup and iterations on MBart50. Arrows indicate the direction of translation. Qualitative results and BLEU scores are available in the appendix D.
|
| 143 |
+
|
| 144 |
+
Proposition 1. Algorithms 1, 2, 3 converge and yield the same results of greedy autoregressive decoding in at most $m$ parallel iterations, for any initialization and providing stopping condition (5).
|
| 145 |
+
|
| 146 |
+
We refer the reader to Song et al. (2021b) for a formal proof. Intuitively, with $m$ steps the algorithm used the same number of iterations of autoregressive, hence the final solution is the same regardless the initialization. In this worst case, the wall-clock time is the same but in general the algorithm reach the stopping condition earlier with a lower wall-clock time and overall speedup.
|
| 147 |
+
|
| 148 |
+
# 3.5 DDGviz
|
| 149 |
+
|
| 150 |
+
Equation 1 models the dependency between tokens in the decoding phase. In the classical autoregressive mode, each token depends on all the previous ones for the generation. However, it is possible to show that this dependency is actually relaxed (i.e., not all tokens depend on all the previous ones), thus it would be interesting to visualize the actual distribution $p_{\theta}(y_i \mid \cdot, \mathbf{x})$ learned by an existing MT model. To this end, we build the Decoding Dependency Graph visualizer (DGGviz) to visualize the dependency graph of tokens in the decoding phase. In the standard autoregressive decoding this graph is a fully-connected chain where the $i$ -th token is
|
| 151 |
+
|
| 152 |
+
connected to all the previous tokens, starting from the encoding $\mathbf{x}$ : to decode $y_{i}$ you need to decode first $y_{1},\ldots ,y_{i - 1}$ . Instead we show that there are skipping connections between independent tokens that can be visualized with DGGviz. We detail DGGviz with an example in section 4.3.
|
| 153 |
+
|
| 154 |
+
# 4 Experiments
|
| 155 |
+
|
| 156 |
+
# 4.1 Experimental Settings
|
| 157 |
+
|
| 158 |
+
Datasets. We evaluate our approach using standard evaluation datasets proposed for parallel MT (Gu et al., 2018): WMT14 English-German [En-De], WMT16 English-Romanian [En-Ro] (Bojar et al., 2014, 2016). Additionally, we tested our method on different language pairs with varying (low-medium) resources: IWSLT15 (English-Vietnamese [En-Vi]) (Tran et al., 2015), IITB (English-Hindi [En-Hi]) (Kunchukuttan et al., 2018), WMT17 (English-Finnish [En-Fi]) (Bojar et al., 2017), FLORES-101 (English-Italian [En-It]; English-French [En-Fr]) (Goyal et al., 2022). All the datasets are evaluated in both directions.
|
| 159 |
+
|
| 160 |
+
Evaluation. All the evaluations are performed using the official test split for each dataset, downloaded using Huggingface dataset library (Lhoest et al., 2021). No training or hyperparameters tun
|
| 161 |
+
|
| 162 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">Requirements</td><td colspan="2">WMT14</td><td colspan="2">Efficiency</td></tr><tr><td>Arch</td><td>Loss</td><td>seq-KD</td><td>Speed ↑</td><td>BLEU ↑</td><td>Train FLOPs ↓</td><td>Total FLOPs ↓</td></tr><tr><td>Parallel Decoding - HGJ (Ours)</td><td>No</td><td>No</td><td>No</td><td>1.34×</td><td>28.24</td><td>0</td><td>2.53e+13</td></tr><tr><td>SUNDAE†(Savinov et al., 2022)</td><td>Yes</td><td>No</td><td>No</td><td>1.4×</td><td>28.46</td><td>5.27e+21</td><td>5.27e+21</td></tr><tr><td>ShallowDec (12-1) (Kasai et al., 2021)</td><td>Yes</td><td>No</td><td>No</td><td>1.4×</td><td>26.90</td><td>1.02e+19</td><td>1.02e+19</td></tr><tr><td>Semi-NAT (Wang et al., 2018)</td><td>Yes</td><td>No</td><td>Yes</td><td>1.5×</td><td>26.90</td><td>1.55e+17</td><td>1.55e+17</td></tr><tr><td>DisCo (Kasai et al., 2020)</td><td>Yes</td><td>Yes</td><td>Yes, Big</td><td>3.5×</td><td>27.34</td><td>4.06e+19</td><td>4.06e+19</td></tr><tr><td>DSLP (Huang et al., 2021)</td><td>Yes</td><td>Yes</td><td>Yes</td><td>14.8×</td><td>27.02</td><td>1.93e+19</td><td>1.93e+19</td></tr><tr><td>F-VAE (Gu and Kong, 2021)</td><td>Yes</td><td>Yes</td><td>Yes, Big</td><td>16.5×</td><td>27.49</td><td>4.06e+19</td><td>4.06e+19</td></tr></table>
|
| 163 |
+
|
| 164 |
+
Table 3: Comparison of different methods for parallel MT on WMT14 En-De. Results are ordered by speed, highlighted in green the two highest BLEU scores, $\dagger$ indicates diffusion models. Existing methods require training, architecture modifications, additional losses to force parallel translation, and distillation from an additional MT transformer model ("Big" indicates the size). Details on FLOPs computation are available in the Appendix C.
|
| 165 |
+
|
| 166 |
+
ing is performed. We use SacreBLEU to evaluate the translation quality (Papineni et al., 2002; Post, 2018). We measure speedup in wall-clock time and iterations w.r.t. the same autoregressive model. GPU times are calculated after calling torch.cuda.synchronize(). All the experiments were performed by caching the past Keys and Values of the transformer to further speed up the computation (Ramachandran et al., 2017) and in the online inference setting with batch size equal to 1. For the Jacobi and GS-Jacobi algorithms, we assume to know beforehand the length $m$ of the target and measure the speedup in the ideal condition. For the Hybrid GS-Jacobi algorithm, we set $h$ equal to the maximum (i.e., the stopping condition is triggered within a parallel block) to decouple the effective speedup regardless of the length produced by the initialization function (see Section 3.2). We remark that HGJ does not assume to know beforehand the target length and is applicable to real MT translation scenarios.
|
| 167 |
+
|
| 168 |
+
Model Configuration. We tested transformer models in the two standard configurations: base (512 model dimension, 6 attention layers for both encoder and decoder) and big (1024 model dimension, 12 attention layers for both encoder and decoder). We used pretrained models of Opus (Tiedemann and Thottingal, 2020) for the former and MBart50 (Tang et al., 2020) for the latter. Opus is a transformer base model (74M parameters) trained on language pairs from the homonymous dataset (Zhang et al., 2020). MBart50 is a large multilingual transformer model fine-tuned for translation on 50 languages (610M parameters). We tested the models on CPU since this is the default environment for MT models in production, except for the model MBart50 which runs on GPU. We run the experiments on a standard 16-core machine, except for the scaling experiments. Additional specifications are available in Appendix B
|
| 169 |
+
|
| 170 |
+
# 4.2 Algorithms Comparison
|
| 171 |
+
|
| 172 |
+
In Table 1 we compare the proposed parallel decoding algorithms with the standard sequential autoregressive decoding baselines. As we can observe, the fastest algorithms are PGJ Decoding $(\mathrm{b} = 3)$ and HGJ Decoding $(\mathrm{b} = 3)$ which are up to $34\%$ and $38\%$ times faster on Opus and up to $5\%$ and $8\%$ faster on MBart50, depending on the language pair. We note also that results empirically show that all the parallel decoding algorithms guarantee the same quality of greedy autoregressive decoding, as evidenced by the unchanged BLEU scores. This is an experimental verification of the formal Proposition 1. The table also shows that the Beam Search algorithm with a beam size of 5 generally performs better in terms of BLEU score, although at a cost of speed. This difference in terms of BLEU is expected, as beam search is a heuristic search strategy, while our method is a decoding algorithm. We discussed better this aspect in the "Beam Search" paragraph. Nevertheless, beam search is $\sim 30\%$ slower than greedy autoregressive and $63\%$ to $68\%$ slower than PGJ, depending on the model and language pair. This means that the proposed parallel algorithms allow trading a little translation quality (e.g., on en→ro the difference between beam search and parallel decoding algorithms in BLEU is just 0.20 points) for greater decoding speed.
|
| 173 |
+
|
| 174 |
+
Another aspect to note is that the algorithms PJ and PGJ (b=5) are sometimes slower than greedy autoregressive. There are several factors that can influence the actual wall-clock time like how the underlying hardware schedule and execute the various operations, which might vary according to the architecture and the workload. In particular, longer sequences (e.g., the whole sentence in PJ or blocks of 5 tokens in PGJ) may require more memory to store, and the CPU/GPU may have to perform more memory accesses, which can slow down the computation (although theoretically it should happen in parallel). In the end, these computational
|
| 175 |
+
|
| 176 |
+
overheads slow down the actual execution. This is also the case for the difference in speedups between MBart50 and Opus. We better investigated this aspect in the section "Computational Scaling" and report in the appendix results on a different architecture, with also results in terms of iterations speedups which are architecture agnostic.
|
| 177 |
+
|
| 178 |
+
# 4.3 Analysis and Validation
|
| 179 |
+
|
| 180 |
+
Cross Languages. In order to demonstrate the robustness of our decoding algorithms with respect to the translation languages, we leveraged the multilingual capabilities of the MBart50 model and selected a diverse range of language pairs for evaluation. The results, presented in Table 2, show that both PGJ and HGJ achieve a consistent speedup in comparison to the autoregressive decoding method, with an improvement ranging from $2 - 7\%$ for PGJ and $1 - 5\%$ for HGJ, regardless of the language pair used. Additionally, we observed a speedup in terms of iterations of $7 - 11\%$ for PGJ and $5 - 7\%$ for HGJ. These findings indicate that our algorithms have the potential to match or surpass the speedup in terms of wall-clock time by fully exploiting this saving in terms of iterations. We note that, similar to the previous experiment, PJ suffers from an overhead problem. To the best of our knowledge, this is one of the first studies that have achieved a speedup in multilingual machine translation, concurrent with the work of Song et al. (2022), while this latter is significantly different in spirit and requirements (NAT model). We leave BLEU scores in the Appendix D for space constraints together with qualitative results in different languages.
|
| 181 |
+
|
| 182 |
+
Computational Scaling. In Figure 3, we present an analysis of the scalability of our proposed methods in relation to increasing computational resources. Starting with 8 cores, our methods demonstrate a slight improvement in terms of wall-clock time for PGJ and HGJ, with speedups of 1.11 and 1.09 respectively. On the other hand, this amount of resources is too restricting for PJ which needs to fit the whole sentence and thus achieve a score of 0.46 due to the aforementioned overhead problem. As the resources are increased, our method demonstrates the ability to effectively leverage hardware and significantly reduce decoding time, while the autoregressive baseline is constrained by sequential processing. With 122 cores, a substantial speedup of $1.98 \times$ and $1.99 \times$ is achieved for PGJ and HGJ respectively, while the autoregressive baseline is
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 3: Scaling experiments on WMT16 En-De with PGJ and HGJ blocks $= 3$ . Increasing the number of available resources (number of CPU cores) allows the methods to decrease the parallel overheads. As a result, the speedup increases and the methods scale.
|
| 186 |
+
|
| 187 |
+
bounded by sequential processing at $1.00 \times$ . It is important to note that this experiment does not simulate a real production system, but rather it is meant to show what results can be achieved when the underlying computation is properly optimized to run in parallel. In our case, we simulated this setting with increasing cores, nevertheless similar results can be achieved with additional software optimizations to further reduce latency and overheads (Ahmed et al., 2022; Kim et al., 2019) and increase the speed gain with parallel-optimized computations. Overall this experiment serves as a proof of concept for the capabilities of parallel decoding in contexts with limited overhead and shows a promising direction for further improvements.
|
| 188 |
+
|
| 189 |
+
Comparison with NATs. Table 3 reports the comparison of our parallel decoding algorithm with a selection of NAT methods for parallel MT. Following prior works, we report for each method the speedup relative to the autoregressive transformer base baseline from their original paper (Xiao et al., 2022). It is worth noting that, although these methods can achieve higher speedups, they are very demanding in terms of computational resources which must be accounted for in a fair comparison. To estimate quantitatively this cost, we evaluated the number of floating point operations (FLOPs) required for training and inference on WMT14.
|
| 190 |
+
|
| 191 |
+
Results show that our method HGJ uses the least number of computational resources, even considering the additional cost at inference time. Relating the speedup obtained with the used resources (FLOPs/speed), our method still achieves the best
|
| 192 |
+
|
| 193 |
+
cost-benefit ratio. Furthermore, NATs generally degrade the translation quality if compared to their autoregressive baseline. On the contrary, our method mathematically guarantees the same quality of autoregressive decoding, which is higher than standard NAT models.
|
| 194 |
+
|
| 195 |
+
SUNDAE achieves BLEU of 28.46, but requires more resources than training RoBERTa (Liu et al., 2019) on 16 TPUs (see Appendix C). Other methods require further elaborate techniques like profound architectural changes, additional losses to force parallel translation and sequence-level distillation from large autoregressive transformers (Gu and Kong, 2021). Our approach is a decoding method that does not involve any training or modification to the model and can be used to speed up existing models on standard desktop hardware.
|
| 196 |
+
|
| 197 |
+
Speedup Analysis. We provide here a preliminary analysis of the factors responsible for the observed speedup in our method. We first distinguish between two types of speedup: wall-clock speedup and iterations speedup. The former is primarily driven by the parallelization capability of our method, as demonstrated in the "Computational Scaling" section. With parallel decoding, underlying operations can be optimized and fused to be executed fastly. Compared to Sheng et al. (2023), our method allows parallelizing sequence operations ("row-by-row" setting). The latter instead may vary consequently to several factors (e.g., model/vocabulary size, training data, language, etc). For this reason, we experimented with several variations of these factors (models Transformer Base vs. Big, vocabularies 58K Marian vs. 250K MBart50, languages, and hardware). While it is challenging to decouple different elements, our analysis point out several interesting insights. For example, we observed that iteration results on MBart50 are generally higher compared to Marian (Tables 2-6), possibly due to the finer-grained tokenization of MBart50. We also hypothesize that language and linguistic features, such as inflectionally rich or agglutinative/gendered languages, may influence iteration speedups. To facilitate this type of analysis, we developed DDGviz, which we believe will be useful for research in this area.
|
| 198 |
+
|
| 199 |
+
Visualizing Parallel Decoding. In previous experiments, we demonstrated that parallel decoding is feasible. This suggests that the dependency learned by the model between certain tokens is relaxed, as some tokens can be decoded in parallel.
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
Figure 4: DDGviz. Visualization of the translation EnRo: "How satisfied are the Romanian couples: men versus women" $\rightarrow$ "Cât de satisfacuti sunt cuplurile romanești: bărbatai impotriva femeilor". (Highlighted tokens decoded in parallel). On top: the Decoding Dependency Graph, omitting redundant edges on non-parallel tokens to ease visualization. On bottom: DDGviz shows at each Parallel Jacobi iteration (vertical axis) which tokens have been generated in parallel (horizontal axis) with the corresponding probability (cell number).
|
| 203 |
+
|
| 204 |
+
Analyzing and understanding when this happens allows shedding light on the behavior of existing models and a separate study focused on this issue would be needed. In this work, we lay the ground for a such study introducing the necessary inspection tools. While we have already introduced DDGviz in Section 3.5, in this experiment we show how it works and how it can be used with a practical example. In summary, the DDGviz visualizer allows to show the real decoding distribution $p_{\theta}(y_i \mid \cdot, \mathbf{x})$ learned by a MT model. This decoding distribution is plotted as a graph, where a connection indicates the dependency $p_{\theta}(y_i \mid \cdot)$ , by using Parallel Jacobi decoding. At each PJ decoding iteration (vertical axis of Figure 4), DDGviz keeps track of which tokens have been correctly decoded w.r.t. the gold autoregressive reference of the model, showing the tokens correctly decoded and the probability of each one (horizontal axis). Figure 4 shows DDGviz applied on an example. The example shows that for $y_4 = \_sa$ it is possible to decode more than one token in parallel $y_5 = tis$ , $y_6 = fa$ , hence here the decoding of $y_6$ does not depend on the decoding of $y_5 - p_{\theta}(y_6 \mid \mathbf{y}_{1:4}, \mathbf{x})$ . We observed this phenomenon frequently, explaining the speedups in the previous experiments. The
|
| 205 |
+
|
| 206 |
+
example also shows that the model is able to decode five tokens in parallel after $y_7 = \_cu$ . This is a peculiar case since the model, given "How satisfi_, is generating all at once "_ed are the Romanian couples" (proposed here in English for better readability, original version in Romanian is available in Figure). This example indeed shows how DDGviz can be used to highlight possible biases encoded in the model as it is not clear how the model can be so confident (see cell probability) that after "satisfied" the most straightforward tokens to decode are "Romanian couples" (Chang et al., 2019; Savoldi et al., 2021). We leave other use cases for future works and show in Appendix D several visualizations with equally interesting phenomena.
|
| 207 |
+
|
| 208 |
+
# 5 Conclusions
|
| 209 |
+
|
| 210 |
+
In this paper, we showed that it is possible to speed up existing machine translation models by simply changing the decoding algorithm with a parallel formulation. We introduced three parallel decoding methods which achieve consistent speedups without requiring any training, modifications, or quality loss. Our solution is orthogonal to previous approaches proposed in literature which often entail demanding requirements in terms of data, computational resources, and engineering effort. Although our method is not without shortcomings, it is a first valuable step toward integrating parallel decoding algorithms into any model. This is particularly relevant in limited-resource scenarios where NATs are not a viable option and to speed up any transformer model, especially fine-grained or character-level models (Edman et al., 2023). We believe that further advancements in this area, including the exploration of optimal initialization procedures and stopping conditions, as well as the use of alternative parallel solvers for non-linear equations, will close the gap with learning-based techniques and continue to improve the efficiency and effectiveness of parallel decoding algorithms.
|
| 211 |
+
|
| 212 |
+
# Acknowledgements
|
| 213 |
+
|
| 214 |
+
We would like to thank Sébastien Bratières for his throughout feedback provided on this project. This work is supported by Translated with an Imminent Research Grant, ERC Starting Grant No. 802554 (SPECGEO), and PRIN 2020 project n.2020TA3K9N "LEGO.AI". Riccardo Marin is also supported by an Alexander von Humboldt Foundation Research Fellowship.
|
| 215 |
+
|
| 216 |
+
# Limitations
|
| 217 |
+
|
| 218 |
+
The proposed algorithms allow to speed up an existing model out-of-the-box, without any modification or retraining. However, there are some considerations to bear in mind when using parallel decoding in order to have a speedup in terms of wall-clock time. Firstly, as the name implies, the method executes the decoding phase in parallel. Therefore, to appreciate the speedup one should be able to run computations in parallel. Using parallel decoding without parallel resources or parallel-optimized software may increase wall-clock time due to overheads, leading to a waste of computation. This is further discussed in Section 4.3 "Computational Scaling". The reported wall-clock time results are thus to be considered within the scope of the experimental setup proposed in this paper and they may vary depending on the underlying hardware and software. Secondly, the method allows speedup of the decoding by scaling on parallel resources. This implies an additional computational cost during the inference phase to achieve a speedup. While using parallel decoding, one should consider a trade-off between the desired acceleration and the utilization of computational resources. Thirdly, since our method performs the decoding in parallel, as for NAT systems, it is difficult to combine it with Beam Search. Beam Search is inherently a dynamic programming algorithm and it is not possible to efficiently maximize the joint probability of the large search space without using sequential intermediate computations. We better explain this aspect in the next paragraph.
|
| 219 |
+
|
| 220 |
+
Beam Search. Beam search is widely employed to enhance the translation quality in MT (Sutskever et al., 2014; Bahdanau et al., 2015) as well as in other domains such as audio (Reddy, 1977; Postolache et al., 2023). However, it is an inherently sequential procedure that stores partial joint probabilities of the entire sequence (beams) while progressing with autoregressive decoding. Determining the maximal joint probability of all sequences in parallel is a challenging task, equivalent to a full maximum a posteriori (MAP) estimation. This is an open research problem and it is also an issue for NAT methods. NAT methods patch up this limitation with sequence-level KD which has the advantage of "not requiring any beam search at test-time" (Kim and Rush, 2016) thanks to learning and distillation from large models. Since our
|
| 221 |
+
|
| 222 |
+
method is a decoding algorithm, we cannot use the same approach without learning. Nevertheless, the quality guarantee allows our methods to have performance on par with greedy autoregressive and generally better than a NAT model. We think of our method, not as a replacement for beam search, but rather as a way to obtain a speedup at inference time that is a middle ground between autoregressive greedy decoding (high quality, no requirements, no speed) and NATs (quality compromises, increasing requirements with increasing speed). Future works might address the quality gap with beam search by combining parallel decoding with alternative techniques like Minimum Bayes Risk (Eikema and Aziz, 2020).
|
| 223 |
+
|
| 224 |
+
# Ethics Statement
|
| 225 |
+
|
| 226 |
+
Increasing the inference speed of MT can positively impact society by giving people a fast and good translation. This will enable people from different language backgrounds to communicate with each other and help remove cultural and trade barriers. As demonstrated by comparing the number of FLOPs in Table 3, our method uses fewer resources compared to alternatives and thus has a smaller carbon footprint, making it a more sustainable choice (Strubell et al., 2019). Furthermore, since our method does not involve training procedures or change the quality of results, we do not introduce any societal bias (e.g. racism, sexism, homophobia) into the translations. The latter, however, can be introduced through data in the training of the backbone autoregressive models and NATs. It is the task of those who train these models to mitigate this problem. DDGviz can also help investigate and visualize some potential harmful biases encoded in the model like in Figure 4.
|
| 227 |
+
|
| 228 |
+
# References
|
| 229 |
+
|
| 230 |
+
Ibrahim Ahmed, Sahil Parmar, Matthew Boyd, Michael Beidler, Kris Kang, Bill Liu, Kyle Roach, John Kim, and Dennis Abts. 2022. Answer fast: Accelerating bert on the tensor streaming processor. In 2022 IEEE 33rd International Conference on Application-specific Systems, Architectures and Processors (ASAP), pages 80-87. IEEE.
|
| 231 |
+
Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
|
| 232 |
+
|
| 233 |
+
Ondrej Bojar, Christian Buck, Christian Federmann, Barry Haddow, Philipp Koehn, Johannes Leveling, Christof Monz, Pavel Pecina, Matt Post, Herve Saint-Amand, Radu Soricut, Lucia Specia, and Ales Tamchyna. 2014. Findings of the 2014 workshop on statistical machine translation. In Proceedings of the Ninth Workshop on Statistical Machine Translation, pages 12-58, Baltimore, Maryland, USA. Association for Computational Linguistics.
|
| 234 |
+
Ondrej Bojar, Rajen Chatterjee, Christian Federmann, Yvette Graham, Barry Haddow, Shujian Huang, Matthias Huck, Philipp Koehn, Qun Liu, Varvara Logacheva, Christof Monz, Matteo Negri, Matt Post, Raphael Rubino, Lucia Specia, and Marco Turchi. 2017. Findings of the 2017 conference on machine translation (wmt17). In Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers, pages 169-214, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 235 |
+
Ondrej Bojar, Rajen Chatterjee, Christian Federmann, Yvette Graham, Barry Haddow, Matthias Huck, Antonio Jimeno Yepes, Philipp Koehn, Varvara Logacheva, Christof Monz, Matteo Negri, Aurelie Neveol, Mariana Neves, Martin Popel, Matt Post, Raphael Rubino, Carolina Scarton, Lucia Specia, Marco Turchi, Karin Verspoor, and Marcos Zampieri. 2016. Findings of the 2016 conference on machine translation. In Proceedings of the First Conference on Machine Translation, pages 131-198, Berlin, Germany. Association for Computational Linguistics.
|
| 236 |
+
Kai-Wei Chang, Vinodkumar Prabhakaran, and Vicente Ordonez. 2019. Bias and fairness in natural language processing. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP): Tutorial Abstracts, Hong Kong, China. Association for Computational Linguistics.
|
| 237 |
+
Charlie Chen, Sebastian Borgeaud, Geoffrey Irving, Jean-Baptiste Lespiau, Laurent Sifre, and John Jumper. 2023. Accelerating large language model decoding with speculative sampling.
|
| 238 |
+
Cunxiao Du, Zhaopeng Tu, and Jing Jiang. 2021. Order-agnostic cross entropy for non-autoregressive machine translation. In International Conference on Machine Learning, pages 2849-2859. PMLR.
|
| 239 |
+
Lukas Edman, Gabriele Sarti, Antonio Toral, Gertjan van Noord, and Arianna Bisazza. 2023. Are character-level translations worth the wait? comparing character- and subword-level models for machine translation.
|
| 240 |
+
Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018. Understanding back-translation at scale. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 489-500, Brussels, Belgium. Association for Computational Linguistics.
|
| 241 |
+
|
| 242 |
+
Bryan Eikema and Wilker Aziz. 2020. Is MAP decoding all you need? the inadequacy of the mode in neural machine translation. In Proceedings of the 28th International Conference on Computational Linguistics, pages 4506-4520, Barcelona, Spain (Online). International Committee on Computational Linguistics.
|
| 243 |
+
Xinwei Geng, Xiaocheng Feng, and Bing Qin. 2021. Learning to rewrite for non-autoregressive neural machine translation. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3297-3308, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 244 |
+
Marjan Ghazvininejad, Vladimir Karpukhin, Luke Zettlemoyer, and Omer Levy. 2020a. Aligned cross entropy for non-autoregressive machine translation. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 3515-3523. PMLR.
|
| 245 |
+
Marjan Ghazvininejad, Omer Levy, Yinhan Liu, and Luke Zettlemoyer. 2019. Mask-predict: Parallel decoding of conditional masked language models. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 6112-6121, Hong Kong, China. Association for Computational Linguistics.
|
| 246 |
+
Marjan Ghazvininejad, Omer Levy, and Luke Zettlemoyer. 2020b. Semi-autoregressive training improves mask-predict decoding. arXiv preprint arXiv:2001.08785.
|
| 247 |
+
Naman Goyal, Cynthia Gao, Vishrav Chaudhary, Peng-Jen Chen, Guillaume Wenzek, Da Ju, Sanjana Krishnan, Marc'Aurelio Ranzato, Francisco Guzmán, and Angela Fan. 2022. The Flores-101 evaluation benchmark for low-resource and multilingual machine translation. Transactions of the Association for Computational Linguistics, 10:522-538.
|
| 248 |
+
Jiatao Gu, James Bradbury, Caiming Xiong, Victor O.K. Li, and Richard Socher. 2018. Non-autoregressive neural machine translation. In International Conference on Learning Representations.
|
| 249 |
+
Jiatao Gu and Xiang Kong. 2021. Fully non-autoregressive neural machine translation: Tricks of the trade. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 120-133, Online. Association for Computational Linguistics.
|
| 250 |
+
Yongchang Hao, Shilin He, Wenxiang Jiao, Zhaopeng Tu, Michael Lyu, and Xing Wang. 2021. Multi-task learning with shared encoder for non-autoregressive machine translation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3989-3996, Online. Association for Computational Linguistics.
|
| 251 |
+
|
| 252 |
+
Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The curious case of neural text degeneration. In International Conference on Learning Representations.
|
| 253 |
+
Chenyang Huang, Hao Zhou, Osmar R. Zaiane, Lili Mou, and Lei Li. 2021. Non-autoregressive translation with layer-wise prediction and deep supervision. CoRR, abs/2110.07515.
|
| 254 |
+
Xiao Shi Huang, Felipe Perez, and Maksims Volkovs. 2022. Improving non-autoregressive translation models without distillation. In International Conference on Learning Representations.
|
| 255 |
+
Jungo Kasai, James Cross, Marjan Ghazvininejad, and Jiatao Gu. 2020. Non-autoregressive machine translation with disentangled context transformer. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 5144-5155. PMLR.
|
| 256 |
+
Jungo Kasai, Nikolaos Pappas, Hao Peng, James Cross, and Noah Smith. 2021. Deep encoder, shallow decoder: Reevaluating non-autoregressive machine translation. In International Conference on Learning Representations.
|
| 257 |
+
Sehoon Kim, Karttikeya Mangalam, Jitendra Malik, Michael W. Mahoney, Amir Gholami, and Kurt Keutzer. 2023. Big little transformer decoder.
|
| 258 |
+
Yoon Kim and Alexander M. Rush. 2016. Sequence-level knowledge distillation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1317-1327, Austin, Texas. Association for Computational Linguistics.
|
| 259 |
+
Young Jin Kim, Marcin Junczys-Dowmunt, Hany Hassan, Alham Fikri Aji, Kenneth Heafield, Roman Grundkiewicz, and Nikolay Bogoychev. 2019. From research to production and back: Ludriciously fast neural machine translation. In Proceedings of the 3rd Workshop on Neural Generation and Translation, pages 280-288, Hong Kong. Association for Computational Linguistics.
|
| 260 |
+
Wouter Kool, Herke van Hoof, and Max Welling. 2020. Ancestral gumbel-top-k sampling for sampling without replacement. Journal of Machine Learning Research, 21(47):1-36.
|
| 261 |
+
Taku Kudo. 2018. Subword regularization: Improving neural network translation models with multiple subword candidates. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 66-75, Melbourne, Australia. Association for Computational Linguistics.
|
| 262 |
+
Taku Kudo and John Richardson. 2018. Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing. CoRR, abs/1808.06226.
|
| 263 |
+
|
| 264 |
+
Anoop Kunchukuttan, Pratik Mehta, and Pushpak Bhattacharyya. 2018. The IIT Bombay English-Hindi parallel corpus. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).
|
| 265 |
+
Yaniv Leviathan, Matan Kalman, and Yossi Matias. 2022. Fast inference from transformers via speculative decoding.
|
| 266 |
+
Quentin Lhoest, Albert Villanova del Moral, Yacine Jernite, Abhishek Thakur, Patrick von Platen, Suraj Patil, Julien Chaumont, Mariama Drame, Julien Plu, Lewis Tunstall, Joe Davison, Mario Šaško, Gunjan Chhablani, Bhavitvya Malik, Simon Brandeis, Teven Le Scao, Victor Sanh, Canwen Xu, Nicolas Patry, Angelina McMillan-Major, Philipp Schmid, Sylvain Gugger, Clément Delangue, Theo Matussière, Lysandre Debut, Stas Bekman, Pierric Cistac, Thibault Goehringer, Victor Mustar, François Lagunas, Alexander Rush, and Thomas Wolf. 2021. Datasets: A community library for natural language processing. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 175-184, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 267 |
+
Xiaodong Liu, Kevin Duh, Liyuan Liu, and Jianfeng Gao. 2020. Very deep transformers for neural machine translation. arXiv preprint arXiv:2008.07772.
|
| 268 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.
|
| 269 |
+
J.M. Ortega and W.C. Rheinboldt. 1970. Iterative Solution of Nonlinear Equations in Several Variables. Classics in Applied Mathematics. Society for Industrial and Applied Mathematics (SIAM, 3600 Market Street, Floor 6, Philadelphia, PA 19104).
|
| 270 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.
|
| 271 |
+
Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc.
|
| 272 |
+
|
| 273 |
+
Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Belgium, Brussels. Association for Computational Linguistics.
|
| 274 |
+
Emilian Postolache, Giorgio Mariani, Michele Mancusi, Andrea Santilli, Cosmo Luca, Emanuele Rodola, et al. 2023. Latent autoregressive source separation. In Proceedings of the AAAI Conference on Artificial Intelligence.
|
| 275 |
+
Lihua Qian, Hao Zhou, Yu Bao, Mingxuan Wang, Lin Qiu, Weinan Zhang, Yong Yu, and Lei Li. 2021. Glancing transformer for non-autoregressive neural machine translation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1993-2003, Online. Association for Computational Linguistics.
|
| 276 |
+
Prajit Ramachandran, Tom Le Paine, Pooya Khorrami, Mohammad Babaeizadeh, Shiyu Chang, Yang Zhang, Mark A. Hasegawa-Johnson, Roy H. Campbell, and Thomas S. Huang. 2017. Fast generation for convolutional autoregressive models. CoRR, abs/1704.06001.
|
| 277 |
+
Raj Reddy. 1977. Speech understanding systems: A summary of results of the five-year research effort. Carnegie Mellon University.
|
| 278 |
+
Yousef Saad. 2003. Iterative methods for sparse linear systems. SIAM.
|
| 279 |
+
Chitwan Sahara, William Chan, Saurabh Saxena, and Mohammad Norouzi. 2020. Non-autoregressive machine translation with latent alignments. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1098-1108, Online. Association for Computational Linguistics.
|
| 280 |
+
Nikolay Savinov, Junyoung Chung, Mikolaj Binkowski, Erich Elsen, and Aaron van den Oord. 2022. Step-unrolled denoising autoencoders for text generation. In International Conference on Learning Representations.
|
| 281 |
+
Beatrice Savoldi, Marco Gaido, Luisa Bentivogli, Matteo Negri, and Marco Turchi. 2021. Gender Bias in Machine Translation. Transactions of the Association for Computational Linguistics, 9:845-874.
|
| 282 |
+
Mike Schuster and Kaisuke Nakajima. 2012. Japanese and korean voice search. In 2012 IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2012, Kyoto, Japan, March 25-30, 2012, pages 5149-5152. IEEE.
|
| 283 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715-1725,
|
| 284 |
+
|
| 285 |
+
Berlin, Germany. Association for Computational Linguistics.
|
| 286 |
+
Ying Sheng, Lianmin Zheng, Binhang Yuan, Zhuohan Li, Max Ryabinin, Daniel Y Fu, Zhiqiang Xie, Beidi Chen, Clark Barrett, Joseph E Gonzalez, et al. 2023. High-throughput generative inference of large language models with a singlegpu. arXiv preprint arXiv:2303.06865.
|
| 287 |
+
Jongyoon Song, Sungwon Kim, and Sungroh Yoon. 2021a. AligNART: Non-autoregressive neural machine translation by jointly learning to estimate alignment and translate. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1-14, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 288 |
+
Yang Song, Chenlin Meng, Renjie Liao, and Stefano Ermon. 2021b. Accelerating feedforward computation via parallel nonlinear equation solving. In International Conference on Machine Learning, pages 9791-9800. PMLR.
|
| 289 |
+
Zhenqiao Song, Hao Zhou, Lihua Qian, Jingjing Xu, Shanbo Cheng, Mingxuan Wang, and Lei Li. 2022. switch-GLAT: Multilingual parallel machine translation via code-switch decoder. In International Conference on Learning Representations.
|
| 290 |
+
Mitchell Stern, Noam Shazeer, and Jakob Uszkoreit. 2018. Blockwise parallel decoding for deep autoregressive models. In Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc.
|
| 291 |
+
Emma Strubell, Ananya Ganesh, and Andrew McCallum. 2019. Energy and policy considerations for deep learning in NLP. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3645-3650, Florence, Italy. Association for Computational Linguistics.
|
| 292 |
+
Xin Sun, Tao Ge, Furu Wei, and Houfeng Wang. 2021. Instantaneous grammatical error correction with shallow aggressive decoding. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 5937-5947, Online. Association for Computational Linguistics.
|
| 293 |
+
Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. Advances in neural information processing systems, 27.
|
| 294 |
+
Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Man Goyal, Vishrav Chaudhary, Jiatao Gu, and Angela Fan. 2020. Multilingual translation with extensible multilingual pretraining and finetuning. CoRR, abs/2008.00401.
|
| 295 |
+
|
| 296 |
+
Jörg Tiedemann and Santhosh Thottingal. 2020. OPUSMT — Building open translation services for the World. In Proceedings of the 22nd Annual Conferenc of the European Association for Machine Translation (EAMT), Lisbon, Portugal.
|
| 297 |
+
Viet Hong Tran, Huyen Vu Thong, Nguyen Van-Vinh, and Trung Le Tien. 2015. The English-Vietnamese machine translation system for IWSLT 2015. In Proceedings of the 12th International Workshop on Spoken Language Translation: Evaluation Campaign, pages 80-83, Da Nang, Vietnam.
|
| 298 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, 30.
|
| 299 |
+
Chunqi Wang, Ji Zhang, and Haiqing Chen. 2018. Semi-autoregressive neural machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 479-488, Brussels, Belgium. Association for Computational Linguistics.
|
| 300 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 301 |
+
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144.
|
| 302 |
+
Heming Xia, Tao Ge, Furu Wei, and Zhifang Sui. 2022. Lossless speedup of autoregressive translation with generalized aggressive decoding.
|
| 303 |
+
Yisheng Xiao, Lijun Wu, Junliang Guo, Juntao Li, Min Zhang, Tao Qin, and Tie-yan Liu. 2022. A survey on non-autoregressive generation for neural machine translation and beyond. arXiv preprint arXiv:2204.09269.
|
| 304 |
+
Biao Zhang, Philip Williams, Ivan Titov, and Rico Senrich. 2020. Improving massively multilingual neural machine translation and zero-shot translation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1628-1639, Online. Association for Computational Linguistics.
|
| 305 |
+
|
| 306 |
+
Algorithm 2 Parallel GS-Jacobi Decoding
|
| 307 |
+
Input: $\mathbf{x} = (x_1, \ldots, x_n)$ , $p_\theta$ , $b$
|
| 308 |
+
Output: $\mathbf{y} = (y_1, \ldots, y_m)$
|
| 309 |
+
1: $\mathbf{y} \gets \mathrm{INIT}(\mathbf{x})$
|
| 310 |
+
2: $m \gets \mathrm{len}(\mathbf{y})$
|
| 311 |
+
3: $i \gets 1$
|
| 312 |
+
4: while $i \leqslant m$ do
|
| 313 |
+
5: $\mathbf{o} \gets \mathrm{copy}(y_{i:i+b})$
|
| 314 |
+
6: $\mathbf{y}_{i:i+b} \gets \arg \max(p_\theta(\mathbf{y}_{i:i+b} | \mathbf{y}_{1:i+b}, \mathbf{x}))$
|
| 315 |
+
7: $stop \gets \mathrm{STOPC}(o, y_{i:i+b})$
|
| 316 |
+
8: if stop then
|
| 317 |
+
9: $i \gets i + b$
|
| 318 |
+
10: break
|
| 319 |
+
11: end if
|
| 320 |
+
12: end while
|
| 321 |
+
13: return $\mathbf{y}$
|
| 322 |
+
|
| 323 |
+
# A Algorithms details
|
| 324 |
+
|
| 325 |
+
We propose here the pseudocode of Algorithms 2 and 3 due to space limitations in the main body of the paper.
|
| 326 |
+
|
| 327 |
+
The function $copy(y_{i:i + b})$ creates a copy of the tensor in input detached from the source. This is done in practice to avoid the overwriting of pointers to the same memory location. Function CHECKEOS $(y_{i:i + b})$ returns the index of the token EOS in the block if present, else -1. Function CHECKEOS $(y_i)$ returns True if the tokens in exactly the token EOS, else False. The function arg max selects from the model distribution over the vocabulary the index (token) with maximum probability. This procedure is done for all the tokens in parallel, in the case of parallel decoding, or for just a single token in the case of autoregressive decoding. Generally, the output is the prediction for the next token; hence it should be shifted left before the reassignment to a variable. We omitted this implementation detail for clarity.
|
| 328 |
+
|
| 329 |
+
# B Additional implementation details
|
| 330 |
+
|
| 331 |
+
We run Opus experiments in table 1 on an AMD EPYC Milan with 16 cores at 2.45 GHz and 64GB of RAM (accessible on Google Cloud - c2d-standard-16). For the scalability experiment in figure 3, we also used Google Cloud instances with an increasing number of cores (referred to as c2d-standard-XX, where XX is the number of used cores). Experiments with MBart50 on table 1, 2 and 6 are performed on a Desktop machine with Ubuntu 20.04.4 LTS, AMD
|
| 332 |
+
|
| 333 |
+
<table><tr><td>Dataset</td><td># Test</td></tr><tr><td>WMT 14 De-En (Bojar et al., 2014)</td><td>3003</td></tr><tr><td>WMT 16 Ro-En (Bojar et al., 2016)</td><td>1999</td></tr><tr><td>WMT 17 Fi-En (Bojar et al., 2017)</td><td>3002</td></tr><tr><td>IWSLT 15 En-Vi (Tran et al., 2015)</td><td>1046</td></tr><tr><td>IITB En-Hi (Kunchukuttan et al., 2018)</td><td>2507</td></tr><tr><td>FLORES-101 En-It (Goyal et al., 2022)</td><td>1012</td></tr><tr><td>FLORES-101 En-Fr (Goyal et al., 2022)</td><td>1012</td></tr></table>
|
| 334 |
+
|
| 335 |
+
Table 4: Data Statistic
|
| 336 |
+
|
| 337 |
+
Ryzen 9 3900X 12-Core Processor, 32GB of RAM, and a Palit Nvidia 3090 GPU. Additional experiments with Opus in table 6 are also performed on this machine. Models are implemented in Pytorch 1.11.0 (Paszke et al., 2019) and the Huggingface Transformer library (Wolf et al., 2020). We used python 3.8 and NVIDIA-SMI Drivers 510.73.05 with CUDA version 11.6. For OPUS we used Huggingface models available on the hub under the tag Helsinki-NLP/opus-mt-{src}-{tgt} except for the language pair RoEn where we used the model Helsinki-NLP/opus-mt-roa-en and the pair En-De where we used the checkpoint opus-2021-02-22<sup>4</sup>. For the model MBart50, we used the facebook pre-trained model available on the hub with the tag mbart-large-50-many-to-many-mmt. Since this is a multilingual model, we presuppose the source and target language tag corresponding properly to the language pair to be translated. We report results for a single run over the test dataset since we found low variance in estimates with multiple runs which can be calculated by simply varying the corresponding parameter in the config.yaml file. For each dataset, we used the official test split via the Huggingface dataset library (Lhoest et al., 2021). Datasets statistics are reported in table 4.
|
| 338 |
+
|
| 339 |
+
# C FLOPs calculation details
|
| 340 |
+
|
| 341 |
+
We measured computational complexity using floating point operations (FLOPs), which, as the name imply, counts the number of floating point operation performed by a model. This is a standard metric used in literature to measure hardware-agnostic complexity. This means that hardware and software optimizations are not counted in the score (Wu et al., 2016; Kim et al., 2019). We used the
|
| 342 |
+
|
| 343 |
+
Algorithm 3 Hybrid GS-Jacobi Decoding
|
| 344 |
+
Input: $\mathbf{x} = (x_{1},\dots ,x_{n}),p_{\theta},b$
|
| 345 |
+
Output: $\mathbf{y} = (y_1,\ldots ,y_m)$
|
| 346 |
+
1: $\mathbf{y}\gets \mathrm{INIT}(\mathbf{x})$
|
| 347 |
+
2: $h\gets \mathrm{len}(\mathbf{y})$
|
| 348 |
+
3: $i\gets 1$
|
| 349 |
+
4:eos_cond<-False
|
| 350 |
+
5:while $i\leqslant h$ do
|
| 351 |
+
6: $\mathbf{o}\gets \mathrm{copy}(\mathbf{y}_{i:i + b})$
|
| 352 |
+
7: $\mathbf{y}_{i:i + b}\gets \arg \max (p_{\theta}(\mathbf{y}_{i:i + b}|\mathbf{y}_{1:i + b},\mathbf{x}))$
|
| 353 |
+
8:stop $\leftarrow$ STOPC(o,yi:i+b)
|
| 354 |
+
9:eos_ind<-CHECKEOS(yi:i+b)
|
| 355 |
+
10:if stop and eos_ind>-1 then
|
| 356 |
+
11: $\mathbf{y}\gets \mathbf{y}_1:\mathrm{eos\_ind}$
|
| 357 |
+
12:eos_cond<-True
|
| 358 |
+
13:break
|
| 359 |
+
14:end if
|
| 360 |
+
15:if stop then
|
| 361 |
+
16: $i\gets i + b$
|
| 362 |
+
17:break
|
| 363 |
+
18:end if
|
| 364 |
+
19:end while
|
| 365 |
+
20:while eos_cond!= True do
|
| 366 |
+
21: $y_{i}\gets \arg \max (p_{\theta}(y_{i}|y_{i - 1},\mathbf{x}))$
|
| 367 |
+
22: $i\gets i + 1$
|
| 368 |
+
23:eos_cond<-ISEOS(yi)
|
| 369 |
+
24:end while
|
| 370 |
+
25:return y
|
| 371 |
+
|
| 372 |
+
ELECTRA flops calculator<sup>5</sup> inserting the number of parameters and the number of training step performed for each model analyzed in table 3 according to the training specification in each paper. For inference FLOPs, we computed the decoding cost of each sentence in the testset of WMT14 En-De for each model. For a scale reference, we report in here Table 5 training flops of other well-known architecture. The code package contains the scripts to replicate all the experiments.
|
| 373 |
+
|
| 374 |
+
# D Additional results
|
| 375 |
+
|
| 376 |
+
We propose here additional results to the experiments in the paper that were omitted due to limitations constraints. Table 6 shows the same experiments of Table 1 in the main paper, proposed here on a standard desktop CPU with also the speedup in terms of iterations. It is possible to observe that in the case of MBart50 and PGJ there is a speedup
|
| 377 |
+
|
| 378 |
+
<table><tr><td>Model</td><td>Train FLOPs</td><td>Infer. FLOPs</td><td>Total FLOPs</td></tr><tr><td>Semi-NAT</td><td>1.55e17</td><td>2.08e13</td><td>1.55e17</td></tr><tr><td>Shallow Dec.</td><td>1.02e19</td><td>1.15e13</td><td>1.02e19</td></tr><tr><td>DSLP</td><td>1.93e19</td><td>1.58e13</td><td>1.93e19</td></tr><tr><td>F-VAE</td><td>4.06e19</td><td>1.58e13</td><td>4.06e19</td></tr><tr><td>DisCo</td><td>4.06e19</td><td>1.58e13</td><td>4.06e19</td></tr><tr><td>SUNDAE</td><td>5.27e21</td><td>1.58e14</td><td>5.27e21</td></tr><tr><td>BERT base</td><td>6.43e19</td><td>-</td><td>-</td></tr><tr><td>BERT large</td><td>1.92e20</td><td>-</td><td>-</td></tr><tr><td>RoBERTa</td><td>3.19e21</td><td>-</td><td>-</td></tr></table>
|
| 379 |
+
|
| 380 |
+
Table 5: FLOPs comparison with other models.
|
| 381 |
+
|
| 382 |
+
of $8 - 11\%$ in terms of iterations compare to a time speedup of $3 - 8\%$ . This means that there is room for improvement for our algorithm. Furthermore, results show that the time speedups are consistent also with standard desktop hardware. Table 7 shows the BLEU scores for the cross-lingual experiment. It is possible to observe that parallel decoding algorithms guarantee quality compared to greedy autoregressive and are not so distant from beam search. We show also here in table 5 some qualitative results for the experiments in table 2. Finally, we propose additional visualizations using DGGviz in Figure 6.
|
| 383 |
+
|
| 384 |
+
<table><tr><td rowspan="2">Decoding Algorithm</td><td colspan="2">en→de</td><td colspan="2">de→en</td><td colspan="2">en→ro</td><td colspan="2">ro→en</td></tr><tr><td>Time</td><td>Iters</td><td>Time</td><td>Iters</td><td>Time</td><td>Iters</td><td>Time</td><td>Iters</td></tr><tr><td>Opus</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Greedy Autoregressive</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td></tr><tr><td>Beam Search (beam = 5)</td><td>0.71×</td><td>1.00×</td><td>0.71×</td><td>1.00×</td><td>0.70×</td><td>1.00×</td><td>0.72×</td><td>1.00×</td></tr><tr><td>PJ Decoding</td><td>0.72×</td><td>1.03×</td><td>0.74×</td><td>1.04×</td><td>0.69×</td><td>1.04×</td><td>0.67×</td><td>1.03×</td></tr><tr><td>PGJ Decoding (b = 3)</td><td>1.16×</td><td>1.04×</td><td>1.19×</td><td>1.07×</td><td>1.17×</td><td>1.05×</td><td>1.17×</td><td>1.03×</td></tr><tr><td>HGJ Decoding (b = 3)</td><td>1.16×</td><td>1.04×</td><td>1.19×</td><td>1.06×</td><td>1.17×</td><td>1.05×</td><td>1.17×</td><td>1.03×</td></tr><tr><td>MBart50</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Greedy Autoregressive</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td></tr><tr><td>Beam Search (beam = 5)</td><td>0.76×</td><td>1.00×</td><td>0.77×</td><td>1.00×</td><td>0.77×</td><td>1.00×</td><td>0.76×</td><td>1.00×</td></tr><tr><td>PJ Decoding</td><td>0.88×</td><td>1.03×</td><td>0.88×</td><td>1.03×</td><td>0.86×</td><td>1.04×</td><td>0.85×</td><td>1.03×</td></tr><tr><td>PGJ Decoding (b = 3)</td><td>1.06×</td><td>1.10×</td><td>1.08×</td><td>1.11×</td><td>1.03×</td><td>1.08×</td><td>1.04×</td><td>1.11×</td></tr><tr><td>HGJ Decoding (b = 3)</td><td>1.05×</td><td>1.07×</td><td>1.07×</td><td>1.01×</td><td>1.01×</td><td>1.02×</td><td>1.02×</td><td>1.08×</td></tr></table>
|
| 385 |
+
|
| 386 |
+
Table 6: Comparison of parallel decoding algorithms (highlighted in grey) with sequential decoding using Opus (CPU) and MBart50 (GPU) on WMT14 and WMT16. Speed is showed here both in Time and Iterations w.r.t. the greedy autoregressive baseline.
|
| 387 |
+
|
| 388 |
+
<table><tr><td rowspan="2">Dec. Algorithm</td><td colspan="2">WMT17 En-Fi</td><td colspan="2">IITB En-Hi</td><td colspan="2">IWSLT15 En-Vi</td><td colspan="2">FLORES En-It</td><td colspan="2">En-Fr</td></tr><tr><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td></tr><tr><td>Autoregressive</td><td>17.55</td><td>25.34</td><td>16.50</td><td>24.70</td><td>31.92</td><td>33.94</td><td>22.78</td><td>26.38</td><td>39.51</td><td>38.90</td></tr><tr><td>Beam Search</td><td>18.39</td><td>26.04</td><td>16.87</td><td>25.24</td><td>32.14</td><td>34.59</td><td>23.52</td><td>26.80</td><td>39.59</td><td>39.21</td></tr><tr><td>PJ</td><td>17.54</td><td>25.35</td><td>16.50</td><td>24.69</td><td>31.92</td><td>33.94</td><td>22.78</td><td>26.38</td><td>39.50</td><td>38.90</td></tr><tr><td>PGJ (b=3)</td><td>17.55</td><td>25.35</td><td>16.50</td><td>24.70</td><td>31.93</td><td>33.94</td><td>22.78</td><td>26.38</td><td>39.51</td><td>38.90</td></tr><tr><td>HGJ (b=3)</td><td>17.55</td><td>25.35</td><td>16.50</td><td>24.70</td><td>31.93</td><td>33.94</td><td>22.78</td><td>26.38</td><td>39.51</td><td>38.90</td></tr></table>
|
| 389 |
+
|
| 390 |
+
Table 7: BLEU scores on MBart50.
|
| 391 |
+
Example 1 - Wmt16 En-Ro
|
| 392 |
+
|
| 393 |
+
<table><tr><td>TARGET</td><td>Dl Corbyn va adresa primele dinctre celesaseintrebärlacare are dreptul lscurt tim dupa pranz;zrestatiasa va fi probabilit analizataïndeaproape de mass-media sisi parlamentarii laburisti.</td><td>Times (s)</td><td>BLEU</td></tr><tr><td>A</td><td>Dl Corbyn va ridica pentru a adresa prima dinctre celesaseintrebärlalocate la scurt tim dupa miezul zilei, iarperformanța sa va fi probabilit examinataïndeaproape de presa sisi de parlamentarii laburisti.</td><td>0.51</td><td>19.71</td></tr><tr><td>PJ</td><td>Dl Corbyn va ridica pentru a adresa prima dinctre celesaseintrebärlalocate la scurt tim dupa miezul zilei, iarperformanța sa va fi probabilit examinataïndeaproape de presa sisi de parlamentarii laburisti.</td><td>0.56</td><td>19.71</td></tr><tr><td>PGJ</td><td>Dl Corbyn va ridica pentru a adresa prima dinctre celesaseintrebärlalocate la scurt tim dupa miezul zilei, iarperformanța sa va fi probabilit examinataïndeaproape de presa sisi de parlamentarii laburisti.</td><td>0.45</td><td>19.71</td></tr><tr><td>HGJ</td><td>Dl Corbyn va ridica pentru a adresa prima dinctre celesaseintrebärlalocate la scurt tim dupa miezul zilei, iarperformanța sa va fi probabilit examinataïndeaproape de presa sisi de parlamentarii laburisti.</td><td>0.44</td><td>19.71</td></tr></table>
|
| 394 |
+
|
| 395 |
+
Example 2 - Flores En-It
|
| 396 |
+
|
| 397 |
+
<table><tr><td>TARGET</td><td>Quando un piccolo gruppo di esseri viventi (una piccola popolazione) si separa alla popolazione principale alla quale appartiene (per esempio se si sposta altre una catena montuosa o un fume, o si sposta su una nuova isola, rendendo quando difficile un eventuale ritorno),esso si nitroverà probabilitmente in un ambiente diverso da quello in cui si trovava prima.</td><td>Times (s)</td><td>BLEU</td></tr><tr><td>A</td><td>Quando un piccolo gruppo di esseri viventi si separa alla popolazione principale da cui provengono, come se si muovano su una catena di montagne o su un fume o se si trasferiscono su una nuova isola per non poter tornare lavorante, si troveranno spesso in un ambiente diverso da quello in cui erano prima.</td><td>0.61</td><td>31.69</td></tr><tr><td>PJ</td><td>Quando un piccolo gruppo di esseri viventi si separa alla popolazione principale da cui provengono, come se si muovano su una catena di montagne o su un fume o se si trasferiscono su una nuova isola per non poter tornare lavorante, si troveranno spesso in un ambiente diverso da quello in cui erano prima.</td><td>0.73</td><td>31.69</td></tr><tr><td>PGJ</td><td>Quando un piccolo gruppo di esseri viventi si separa alla popolazione principale da cui provengono, come se si muovano su una catena di montagne o su un fume o se si trasferiscono su una nuova isola per non poter tornare lavorante, si troveranno spesso in un ambiente diverso da quello in cui erano prima.</td><td>0.58</td><td>31.69</td></tr><tr><td>HGJ</td><td>Quando un piccolo gruppo di esseri viventi si separa alla popolazione principale da cui provengono, come se si muovano su una catena di montagne o su un fume o se si trasferiscono su una nuova isola per non poter tornare lavorante, si troveranno spesso in un ambiente diverso da quello in cui erano prima.</td><td>0.59</td><td>31.69</td></tr></table>
|
| 398 |
+
|
| 399 |
+
Example 3 - Wmt14 En-De
|
| 400 |
+
|
| 401 |
+
<table><tr><td>TARGET</td><td>Bei der diesjährigen Veranstaltung gibt es Auftritte von Wanda Sykes, Kathy Griffin und Bill Maher sowie auch von „Stand Up for Heroes“, einer jährlichen Musik- und Comedy-Benefizveranstaltung für Armeeveteranen im Madison Square Garden, bei der unter anderen Bruce Springsteen, Jon Stewart, Roger Waters und Bill Cosby auftreten.</td><td>Times (s)</td><td>BLEU</td></tr><tr><td>A</td><td>Zu den diesjährigen Veranstaltungen gehören Auftritte von Wanda Sykes, Kathy Griffin und Bill Maher sowie "Stand Up for Heroes", ein jährlicher Musik- und Komödie-Vorteil für Militärveteranen, im Madison Square Garden, mit u.a. Bruce Springsteen, Jon Stewart, Roger Waters und Bill Cosby.</td><td>1.30</td><td>47.04</td></tr><tr><td>PJ</td><td>Zu den diesjährigen Veranstaltungen gehören Auftritte von Wanda Sykes, Kathy Griffin und Bill Maher sowie "Stand Up for Heroes", ein jährlicher Musik- und Komödie-Vorteil für Militärveteranen, im Madison Square Garden, mit u.a. Bruce Springsteen, Jon Stewart, Roger Waters und Bill Cosby.</td><td>2.43</td><td>47.04</td></tr><tr><td>PGJ</td><td>Zu den diesjährigen Veranstaltungen gehören Auftritte von Wanda Sykes, Kathy Griffin und Bill Maher sowie "Stand Up for Heroes", ein jährlicher Musik- und Komödie-Vorteil für Militärveteranen, im Madison Square Garden, mit u.a. Bruce Springsteen, Jon Stewart, Roger Waters und Bill Cosby.</td><td>1.09</td><td>47.04</td></tr><tr><td>HGJ</td><td>Zu den diesjährigen Veranstaltungen gehören Auftritte von Wanda Sykes, Kathy Griffin und Bill Maher sowie "Stand Up for Heroes", ein jährlicher Musik- und Komödie-Vorteil für Militärveteranen, im Madison Square Garden, mit u.a. Bruce Springsteen, Jon Stewart, Roger Waters und Bill Cosby.</td><td>1.08</td><td>47.04</td></tr></table>
|
| 402 |
+
|
| 403 |
+
Example 4 - Flores En-Fr
|
| 404 |
+
|
| 405 |
+
<table><tr><td>TARGET</td><td>Cinq minutes après le début de l'exposition, un vent se met à souffler pour atteindre, environ une minute plus tard, la vitesse de 70km/h... puis la pluie arrive, mais si force et si grosse qu'elle frappe votre peau comme une aiguille, puis la grête tombe du ciel, lesgens paniquent, crient et se roulent dessus.</td><td>Times (s)</td><td>BLEU</td></tr><tr><td>A</td><td>Cinq minutes après l'exposition, le vent commence à tourner, environ un minute plus tard, le vent atteint 70 km/h, puis la pluie arrive, mais si force et si grande qu'elle vous frappe la peau comme une aiguille, puis le hail tombe du ciel, lesgens paniquent, s'experiment et se courent l'un sur l'autre.</td><td>0.82</td><td>39.90</td></tr><tr><td>PJ</td><td>Cinq minutes après l'exposition, le vent commence à tourner, environ un minute plus tard, le vent atteint 70 km/h, puis la pluie arrive, mais si force et si grande qu'elle vous frappe la peau comme une aiguille, puis le hail tombe du ciel, lesgens paniquent, s'experiment et se courent l'un sur l'autre.</td><td>0.94</td><td>39.90</td></tr><tr><td>PGJ</td><td>Cinq minutes après l'exposition, le vent commence à tourner, environ un minute plus tard, le vent atteint 70 km/h, puis la pluie arrive, mais si force et si grande qu'elle vous frappe la peau comme une aiguille, puis le hail tombe du ciel, lesgens paniquent, s'experiment et se courent l'un sur l'autre.</td><td>0.73</td><td>39.90</td></tr><tr><td>HGJ</td><td>Cinq minutes après l'exposition, le vent commence à tourner, environ un minute plus tard, le vent atteint 70 km/h, puis la pluie arrive, mais si force et si grande qu'elle vous frappe la peau comme une aiguille, puis le hail tombe du ciel, lesgens paniquent, s'experiment et se courent l'un sur l'autre.</td><td>0.72</td><td>39.90</td></tr></table>
|
| 406 |
+
|
| 407 |
+
Example 5 - IWSLT15 En-Vi
|
| 408 |
+
|
| 409 |
+
<table><tr><td>TARGET</td><td>Tói yesso suc manh cuia tién hoá, và tói阿拉伯 rato dièu rát co bän doí而导致 miu su' sóng trong nhùng sinh vátdon bao, mõi té bao chi dón gián lâ phàn chia, và miu thóc tin di truyèn trong tí bó do duç c triyên sang hai tê bó con.</td><td>Times (s)</td><td>BLEU</td></tr><tr><td>A</td><td>Tói dã yesso trich suc manh cuia sú tién hoá vã tói阿拉伯 rato dièu rát can bäntron háu hét su tôn tai cua su' sóng trong cac sinh vátdon bao mõi té bao dón gián lâ chia ra vã tát ca näng luǒng di truyèn cua tê bao dó duç c vânданhong trong cai hét bó con.</td><td>0.61</td><td>31.45</td></tr><tr><td>PJ</td><td>Tói dã yesso trich suc manh cuia sú tién hoá vã tói阿拉伯 rato dièu rát can bäntron háu hét su tôn tai cua su' sóng trong cac sinh vátdon bao mõi té bao dón gián lâ chia ra vã tát ca näng luǒng di truyèn cua tê bao do duç c vânданhong trong cai hét bó con.</td><td>0.71</td><td>31.45</td></tr><tr><td>PGJ</td><td>Tói dã yesso trich suc manh cuia sú tién hoá vã tói阿拉伯 rato dièu rát can bäntron háu hét su tôn tai cua su' sóng trong cac sinh vátdon bao mõi té bao dón gián lâ chia ra vã tát ca näng luǒng di truyèn cua tê bao duó duç c vânданhong trong cai hét bó con.</td><td>0.54</td><td>31.45</td></tr><tr><td>HGJ</td><td>Tói dã yesso trich suc manh cuia sú tién hoá vã tói阿拉伯 rato dièu rát can bäntron háu hét su tôn tai cua su' sóng trong cac sinh vátdon bao mõi té bao dón gián lâ chia ra vã tát ca näng luǒng di truyèn cua tê bão do duç c vânданhong trong cai hét bó con.</td><td>0.53</td><td>31.45</td></tr></table>
|
| 410 |
+
|
| 411 |
+
Table 7: Translation examples generated with the autoregressive (A) and the different decoding algorithms proposed (PJ, PGJ, HGJ) on Opus (WMT datasets) and MBart50. The decoding time is shown in seconds.
|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
(a) En-De: "Lack of Scots title race bores Dutch - de Boer" $\rightarrow$ "Fehlende Schottentitelrennen bohrt Niederlandisch - de Boer"
|
| 415 |
+
|
| 416 |
+

|
| 417 |
+
(b) De-En: "Private Fachgeschäfte und auch den Großhandel gibt es fast nicht mehr." $\rightarrow$ "Private specialist shops and wholesale trade are almost no longer available."
|
| 418 |
+
|
| 419 |
+

|
| 420 |
+
(c) Ro-En: "Un prim contract de lucrări a fost reziliat în avril 2012, după ce se efectuasață lucrări de 4,5 milioane lei." → "A first contract of employment was terminated in April 2012, after a work of 4.5 million lei."
|
| 421 |
+
|
| 422 |
+

|
| 423 |
+
(d) En-Ro: "'Shot in Joburg': Homeless youth trained as photographers" $\rightarrow$ "'Fotografia in Joburg': Tineri fãrã adapost formatã ca fotografi"
|
| 424 |
+
|
| 425 |
+

|
| 426 |
+
(e) De-En: "Einige sind nach der Installation auf Probleme gestoßen, da sie eine Fehlermeldung erhalten, die mitteilt, dass die "Software-Aktualisierung fehlgeschlagen" ist." $\rightarrow$ "Some have encountered problems after installation, as they receive an error message that tells us that "software update has failed".
|
| 427 |
+
|
| 428 |
+

|
| 429 |
+
(f) Ro-En: "Se pare că va fi acuzat de fugă de la locul accidentului, neoferirea primului ajutorși alte infractiuni rutiere." $\rightarrow$ "Apparently he'll be charged with running from the scene of the accident, the first aid and other road crimes."
|
| 430 |
+
Figure 6: DGGviz additional visualizations
|
| 431 |
+
|
| 432 |
+
# A For every submission:
|
| 433 |
+
|
| 434 |
+
A1. Did you describe the limitations of your work?
|
| 435 |
+
|
| 436 |
+
Limitations section
|
| 437 |
+
|
| 438 |
+
A2. Did you discuss any potential risks of your work?
|
| 439 |
+
|
| 440 |
+
Ethics Statements
|
| 441 |
+
|
| 442 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 443 |
+
|
| 444 |
+
Abstract
|
| 445 |
+
|
| 446 |
+
A4. Have you used AI writing assistants when working on this paper?
|
| 447 |
+
|
| 448 |
+
We used ChatGPT to rephrase some sentences in the final camera-ready version in sections 4.3 and 5.
|
| 449 |
+
|
| 450 |
+
# B Did you use or create scientific artifacts?
|
| 451 |
+
|
| 452 |
+
Code to reproduce the experiments (zip)
|
| 453 |
+
|
| 454 |
+
B1. Did you cite the creators of artifacts you used?
|
| 455 |
+
|
| 456 |
+
Section 4 and Appendix B
|
| 457 |
+
|
| 458 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts?
|
| 459 |
+
|
| 460 |
+
License file in the code repository
|
| 461 |
+
|
| 462 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?
|
| 463 |
+
|
| 464 |
+
License file in the code repository
|
| 465 |
+
|
| 466 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 467 |
+
|
| 468 |
+
Not applicable. No data was collected
|
| 469 |
+
|
| 470 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?
|
| 471 |
+
|
| 472 |
+
Not applicable. Left blank.
|
| 473 |
+
|
| 474 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 475 |
+
|
| 476 |
+
Data is automatically downloaded with standard train/test/dev splits via the Huggingface datasets library. Additional statistics in Appendix B
|
| 477 |
+
|
| 478 |
+
# C Did you run computational experiments?
|
| 479 |
+
|
| 480 |
+
Section 4 - Experiments
|
| 481 |
+
|
| 482 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?
|
| 483 |
+
|
| 484 |
+
Section 4.1
|
| 485 |
+
|
| 486 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 487 |
+
|
| 488 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? Section 4.1
|
| 489 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run? Section 4
|
| 490 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? Section 4.1 and Appendix B
|
| 491 |
+
|
| 492 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants? Left blank.
|
| 493 |
+
|
| 494 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? Not applicable. Left blank.
|
| 495 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? Not applicable. Left blank.
|
| 496 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? Not applicable. Left blank.
|
| 497 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? Not applicable. Left blank.
|
| 498 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? Not applicable. Left blank.
|
acceleratingtransformerinferencefortranslationviaparalleldecoding/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f88e2d5e4d3ea0a3df1c661f5cfb92fbe7272731fd62df0ca566669641662eff
|
| 3 |
+
size 1153611
|
acceleratingtransformerinferencefortranslationviaparalleldecoding/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78b7e050fb303f78073f16d9841e03f63560de77d7033c7910a4b5d06500dd98
|
| 3 |
+
size 579217
|
accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/92f3e19f-06e9-454d-a2b2-83f83888e1e7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03709865b8cedfabb822ffe5e653bbc6c4862c4ded95b77c678c9077e2475488
|
| 3 |
+
size 161336
|
accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/92f3e19f-06e9-454d-a2b2-83f83888e1e7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c331a89c7ea87b3d96c32185e64792756f3e5159e3112d299a33638313d7eb82
|
| 3 |
+
size 196663
|
accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/92f3e19f-06e9-454d-a2b2-83f83888e1e7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ccaff4443201c7c7f512a04275f427663b080afb7bffe7bb129357c4e5e8abc2
|
| 3 |
+
size 1583237
|
accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/full.md
ADDED
|
@@ -0,0 +1,763 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ACCENT: An Automatic Event Commonsense Evaluation Metric for Open-Domain Dialogue Systems
|
| 2 |
+
|
| 3 |
+
Sarik Ghazarian $^{1*}$ Yijia Shao $^{2*†}$ Rujun Han $^{3‡}$ Aram Galstyan $^{1}$ Nanyun Peng $^{4}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Southern California / Information Sciences Institute
|
| 6 |
+
|
| 7 |
+
2Peking University
|
| 8 |
+
|
| 9 |
+
$^{3}$ AWS AI Labs
|
| 10 |
+
|
| 11 |
+
<sup>4</sup>Computer Science Department of University of California, Los Angeles
|
| 12 |
+
|
| 13 |
+
{sarik, galstyan} @isi.edu, shaoyj@pku.edu.cn, rujunh@amazon.com, violetpeng@cs.ucla.edu
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
Commonsense reasoning is omnipresent in human communications and thus is an important feature for open-domain dialogue systems. However, evaluating commonsense in dialogue systems is still an open challenge. We take the first step by focusing on event commonsense that considers events and their relations, and is crucial in both dialogues and general commonsense reasoning. We propose ACCENT, an event commonsense evaluation metric empowered by commonsense knowledge bases (CSKBs). ACCENT first extracts event-relation tuples from a dialogue, and then evaluates the response by scoring the tuples in terms of their compatibility with the CSKB. To evaluate ACCENT, we construct the first public event commonsense evaluation dataset for open-domain dialogues. Our experiments show that ACCENT is an efficient metric for event commonsense evaluation, which achieves higher correlations with human judgments than existing baselines.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
Open-domain dialogue systems aim to have natural and engaging conversations with users (Chen et al., 2017). The abundance of dialogue corpus (Dziri et al., 2018) and the development of neural models (Radford et al., 2019; Lewis et al., 2020) enable open-domain dialogue systems to generate grammatically correct and meaningful responses (Zhang et al., 2020d; Bao et al., 2021; Ghazarian et al., 2021). Despite the success, systems still struggle to consistently produce commonsense-compliant responses as humans do. As shown in Figure 1 Example A, the generated response is not compliant with commonsense since "need
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: Examples of nonsensical system responses in open-domain dialogues.
|
| 25 |
+
|
| 26 |
+
an oxygen mask" is not a reasonable prerequisite for "like to paint". Commonsense issues for dialogue systems can also be manifested when we consider the dialogue history. For instance, in Figure 1 Example B, the system's response "That is interesting!" after the user talks about their car accident violates commonly accepted social norms (Frischmann, 2021).
|
| 27 |
+
|
| 28 |
+
In this work, we study automatic dialogue commonsense evaluation by focusing on event commonsense (Sap et al., 2020; Rashkin et al., 2018), which concerns commonsense knowledge about events and their relations. Our focus on event commonsense is motivated by the following three observations: First, advanced open-domain dialogue systems have been pre-trained on large corpus and thus suffer less from factoid commonsense issues (Petroni et al., 2019). Second, events and their relations are key components of commonsense reasoning (McCarthy and Hayes, 1981), and our
|
| 29 |
+
|
| 30 |
+
study shows overall commonsense and event commonsense are highly correlated (see §4). Third, event commonsense aligns well with the interactive nature of open-domain dialogue systems (Huang et al., 2020) to complete certain social goals.
|
| 31 |
+
|
| 32 |
+
To automatically evaluate event commonsense in open-domain dialogues, we propose ACCENT, a reference-free Automatic Event Commonsense Evaluation meTric which leverages commonsense knowledge bases (CSKBs) and measures the quality of generated responses without having ground-truth reference responses. For example, comparing the examples in Figure 1 against the CSKB easily reveals commonsense errors in the responses because when "PersonX likes to paint", what he/she needs may be "to get a paint brush" instead of "to get an oxygen mask", and when "PersonX loses arms from a car accident", the other person is expected to feel "sad".
|
| 33 |
+
|
| 34 |
+
While these judgments are intuitive to human, two challenges exist in automating the evaluation process. First, there is a considerable gap between free-form conversational data and the compact commonsense knowledge in the CSKB. Second, locating relevant knowledge in the CSKB is non-trivial.
|
| 35 |
+
|
| 36 |
+
ACCENT addresses these challenges through a pipeline method that uses an intermediate symbolic representation for commonsense reasoning. ACCENT first extracts event-relation tuples from the target response and its preceding dialogue history via a prompt-based generative model trained in a low-resource setting. Those extracted tuples bridge the gap between the free-form dialogue and the compact form of CSKB. Then, a compatibility score is computed to decide how well each extracted tuple aligns with the CSKB.
|
| 37 |
+
|
| 38 |
+
To train and evaluate ACCENT, we construct the first publicly available event commonsense evaluation dataset for open-domain dialogues (see §4). Besides collecting human commonsense judgments, we request annotators to manually extract event-relation tuples for further analysis.
|
| 39 |
+
|
| 40 |
+
Our main contributions are three-fold:
|
| 41 |
+
|
| 42 |
+
- We propose ACCENT, an event commonsense evaluation metric for open-domain dialogue systems. To the best of our knowledge, this is the first work that systematically studies event commonsense in dialogue systems.
|
| 43 |
+
- We construct the first publicly available event commonsense evaluation dataset for open
|
| 44 |
+
|
| 45 |
+
domain dialogues.1
|
| 46 |
+
|
| 47 |
+
- Extensive experiments show that ACCENT achieves better correlation with human judgments for dialogue commonsense evaluation than several well-designed baselines, and enables easier interpretability of results.
|
| 48 |
+
|
| 49 |
+
# 2 Background: Event Commonsense
|
| 50 |
+
|
| 51 |
+
Endowing machines with human-like commonsense reasoning capabilities has been an ultimate goal of artificial intelligence research for decades (McCarthy and Hayes, 1981; LeCun, 2022). While many early works focused on factoid commonsense or the knowledge about concepts (Lenat, 1995; Liu and Singh, 2004), event commonsense emerges as an important aspect for machine commonsense measurement (Chen et al., 2021). Compared with concepts or entities, events are more informative, involving actions, participants, time etc. Besides, event commonsense also requires understanding various relations between events (Kuipers, 1984; Rashkin et al., 2018) which would facilitate complex reasoning, especially in interactive scenarios such as dialogues.
|
| 52 |
+
|
| 53 |
+
Among the current commonsense resources (related works in Appendix A), $\mathrm{ATOMIC}_{20}^{20}$ (Hwang et al., 2021) is a comprehensive CSKB including physical-entity, event-centered, and social-interaction knowledge. Its event-centered and social-interaction components take up $84.4\%$ tuples of the entire knowledge base, providing knowledge regarding how events/human actions are associated with other events/actions. For example, given the event "X runs out of stream", according to $\mathrm{ATOMIC}_{20}^{20}$ , this event may happen after "X exercises in the gym", and the person X is likely to "feel tired".
|
| 54 |
+
|
| 55 |
+
# 3 Method
|
| 56 |
+
|
| 57 |
+
We present ACCENT, as a framework for event commonsense evaluation. Figure 2 gives an overview of ACCENT with two major components.
|
| 58 |
+
|
| 59 |
+
# 3.1 Symbolic Intermediate Representation
|
| 60 |
+
|
| 61 |
+
ACCENT uses event-relation tuples as the symbolic intermediate representation. Each tuple contains a head event and a tail event which are connected through an event relation. We formally define events and relations below.
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
Figure 2: The overview of ACCENT. Given the target response and its dialogue history, ACCENT first extracts the event-relation tuples. Then, the compatibility test (detailed illustration in Figure 4) assigns a score to each tuple: ACCENT queries the dynamic CSKB, i.e., COMET, with $h$ and $r$ , and generates $k$ events. The compatible score is the maximum similarity between the ground-truth $t$ and the $k$ generated events $\{t_{gen}^i\}_{i=1}^k$ . Scores for all tuples in a response are averaged to obtain the event commonsense score for the target response.
|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
Figure 3: Illustration for event-relation extraction. For each relation $r \in \tilde{\mathcal{R}}$ , we use its corresponding prompt to guide the model to generate $h$ and $t$ . The final tuple is parsed from the generated output.
|
| 68 |
+
|
| 69 |
+
Event Following Pustejovsky et al. (2003), we define events as short phrases with a trigger word and its arguments (e.g., I like to paint). To better align with $\mathrm{ATOMIC}_{20}^{20}$ , we normalize the event by replacing tokens referring to people with Person variable (e.g., PersonX likes to paint).
|
| 70 |
+
|
| 71 |
+
Relation We select $\tilde{\mathcal{R}} = \{\mathrm{xIntent, xWant, oWant, xReact, oReact, xNeed, xAttr, xEffect,}$
|
| 72 |
+
|
| 73 |
+
oEffect, HinderedBy, IsAfter, HasSubEvent\}2 from $\mathrm{ATOMIC}_{20}^{20}$ relations. These relations cover human behaviors, i.e., motivation, want, reaction, need, description, towards events (Sap et al., 2019b), the cause-effect and constraint in force dynamic (Talmy, 1988), the temporal information, as well as the parent-child relation in event hierarchy. Examples for each relation are in Appendix C.
|
| 74 |
+
|
| 75 |
+
# 3.2 Event-Relation Extraction
|
| 76 |
+
|
| 77 |
+
The input of the event commonsense evaluation task is a list of utterances $\{u_0, u_1, \dots, u_{n-1}\}$ representing the dialogue history and the target response $u_n$ . ACCENT first converts the free-form text into event-relation tuples. To retain the information in $u_n$ , ACCENT extracts tuples whose head and tail events are both from the target response (denoted as "Single"). Besides, to capture event commonsense issues conditioned on the dialogue history (e.g., Figure 1 Example B), ACCENT also extracts tuples whose two events come from $u_n$ and $u_{n-1}$ respectively (denoted as "Pair").
|
| 78 |
+
|
| 79 |
+
As illustrated in Figure 3, the event-relation extractor in ACCENT is a T5 model $\mathcal{M}$ (Raffel et al., 2020) guided to generate the head and tail events via designed prompts for each relation. ACCENT concatenates the prompt for $r\in \tilde{\mathcal{R}}$ and the dialogue as the input and fine-tunes $\mathcal{M}$ in a low resource setting. When the relation $r$ exists in the input utterances, the fine-tuned $\mathcal{M}$ is expected to generate the head and tail events following a particular format, i.e., "event1: {head}; event2: {tail}", so that the tuple can be parsed from the decoded sequence (from Block A to Block B in Figure 3).
|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
Figure 4: An example of compatibility test. We query the dynamic CSKB with $h$ and $r$ , and the compatibility score is the maximum similarity between $t$ and the generated tail events $(\{t_{gen}^i\}_{i=1}^k)$ .
|
| 83 |
+
|
| 84 |
+
Otherwise, the fine-tuned $\mathcal{M}$ is expected to output "None". For each $r\in \tilde{\mathcal{R}}$ , the designed prompt explains the semantic meaning of $r$ and triggers the model to generate the head and tail events (the prompts are included in Appendix C). At the inference time, we query $\mathcal{M}$ with prompts for each $r$ and parse the generated outputs to get $h$ and $t$ to construct tuples.
|
| 85 |
+
|
| 86 |
+
# 3.3 Compatibility Test
|
| 87 |
+
|
| 88 |
+
After extracting event-relation tuples, ACCENT checks whether these tuples are sensible through a compatibility test. Denoting the CSKB as $\mathcal{C}$ , the compatibility test aims to learn a scoring function $f$ based on $\mathcal{C}$ , where $f((h,r,t)|\mathcal{C})$ represents the compatibility of the target tuple $(h,r,t)$ with the CSKB $\mathcal{C}$ . We propose to score $(h,r,t)$ by querying a dynamic version of $\mathcal{C}$ with $h$ and $r$ . Figure 4 gives an example of the whole process.
|
| 89 |
+
|
| 90 |
+
Specifically, ACCENT uses COMET (Bosselut et al., 2019) as the dynamic CSKB. COMET adapts the pre-trained language model by fine-tuning it on $\mathcal{C}$ through a conditional generation task where "head{relation} [GEN]" is the source and a tail event is the target. To score $(h,r,t)$ , we query the model by requiring it to generate $t_{gen}$ given $\{\{h\} \{r\} [\mathrm{GEN}]$ ". The beam search method is applied for decoding, so we obtain a set of generated tail events, $\{t_{gen}^i\}_{i = 1}^k$ ,where $k$ is the beam size.
|
| 91 |
+
|
| 92 |
+
The compatibility score for $(h,r,t)$ is then computed by checking the similarity between $t$ and the most similar $t_{gen}$ among $\{t_{gen}^i\}_{i = 1}^k$ :
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
f ((h, r, t) | \mathcal {C}) = \max _ {1 \leq i \leq k} \cos (\operatorname {e m b e d} (t), \operatorname {e m b e d} \left(t _ {\text {g e n}} ^ {i}\right)) \quad (1)
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
Here, $\operatorname{embed}(\cdot)$ is parameterized by a SentenceBERT model (Reimers and Gurevych, 2019).
|
| 99 |
+
|
| 100 |
+
After getting the compatibility scores for each extracted tuple, we average them to get the final score for the target response (see Figure 2).
|
| 101 |
+
|
| 102 |
+
# 4 Data Collection
|
| 103 |
+
|
| 104 |
+
We construct the first event commonsense evaluation dataset for open-domain dialogues through crowdsourcing on Amazon Mechanical Turk (MTurk). In this section, we describe the collection procedure and the details of the dataset.
|
| 105 |
+
|
| 106 |
+
# 4.1 Dialogue Data Preparation
|
| 107 |
+
|
| 108 |
+
We select dialogue histories from DailyDialog (Li et al., 2017), PersonaChat (Zhang et al., 2018), and TopicalChat (Gopalakrishnan et al., 2019) human-human dialogues. The dialogue history is limited to at most 4 consecutive utterances. Since human utterances barely contradict event commonsense, to better evaluate machine generated dialogues, we collect responses using advanced dialogue systems, DialogGPT (Zhang et al., 2020d), PLATO-2 (Bao et al., 2021), DiSCoL (Ghazarian et al., 2021).
|
| 109 |
+
|
| 110 |
+
To ensure most samples contain events and are meaningful for event commonsense evaluation, we filter samples using the following criteria: (1) the response contains at least 5 words; (2) the response contains at least 1 non-interrogative sentence<sup>3</sup>; (3) the response is more than a courtesy (e.g., "It's been nice chatting with you.")<sup>4</sup>. After filtering, we randomly select 300 samples and split them into 200 for training and 100 for testing. We name this dataset DECO (Dialogue Event Commonsense Dataset).
|
| 111 |
+
|
| 112 |
+
# 4.2 Tuple Extraction
|
| 113 |
+
|
| 114 |
+
To train the event-relation extractor of ACCENT, we collect human extracted event-relation tuples from DECO training set. Annotators are shown with the target response, the dialogue history, a specific relation, and are requested to compose event-relation tuples. They are allowed to tick "I cannot find any tuple" if no tuple can be found. We also request them to select whether the tuple belongs to "Single" or "Pair" (defined in §3.2) for each tuple they extract. Figure 8 in Appendix D shows our data collection panel. We launched HITs $^5$ for relations in $\tilde{\mathcal{R}}$ repeatedly until we obtained at least 20 tuples for each relation. In order to ensure the test set is comprehensive, we particularly request annotators to compose tuples for all 12 relations in $\tilde{\mathcal{R}}$ (100 samples × 12 relations in total).
|
| 115 |
+
|
| 116 |
+
3We check this by finding sentences that are not ended with a question mark ("?").
|
| 117 |
+
These responses are manually filtered out.
|
| 118 |
+
$^{5}$ HIT is an assignment unit on Amazon MTurk.
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
Figure 5: Relation distribution in DECO test set.
|
| 122 |
+
|
| 123 |
+
A separate validation round was conducted to check whether each extracted tuple satisfies (1) the head and tail are events, (2) the head and tail come from $u_{n}$ or $u_{n-1}$ , (3) the relation between the head and tail can be inferred from the dialogue. A tuple is deemed valid if the majority of 3 annotators vote "yes". After removing invalid tuples (the dialogue numbers remain unchanged), we collected 307 tuples for training and 467 tuples from the DECO test set. Figure 5 shows the relation distribution in the densely annotated test set. More details about DECO statistics are included in Appendix D.
|
| 124 |
+
|
| 125 |
+
# 4.3 Commonsense Scoring
|
| 126 |
+
|
| 127 |
+
We instruct annotators to score target responses in terms of event commonsense by focusing on the events and their relations (the guideline is shown in Figure 7). Each response was annotated by 3 individual annotators with a scale of 1 to 5. Following Mehri and Eskenazi (2020), we measure the inter annotator agreement (IAA) by correlating each annotation with the mean of the other annotations for the same sample, and the Spearman correlation is 0.578 showing an acceptable agreement. The final event commonsense score assigned to each sample is the average of 3 individual ratings.
|
| 128 |
+
|
| 129 |
+
We also requested the annotators to judge the overall commonsense of a dialogue response before introducing event commonsense to annotators. Among the 900 annotation pairs we collected, the Spearman correlation between the two scores reaches 0.862, which indicates that event commonsense is a key component in overall commonsense reasoning.
|
| 130 |
+
|
| 131 |
+
# 4.4 Additional Human-Machine Dialogues
|
| 132 |
+
|
| 133 |
+
We further explore the generalization ability of ACCENT on responses with human-machine dialogue histories. We select 100 samples from ConTurE (Ghazarian et al., 2022a), a turn-level evaluation dataset, to annotate event commonsense scores. We denote this dataset as ConTurE Subset. Its statistics are also included in Appendix D.
|
| 134 |
+
|
| 135 |
+
# 5 Experiments
|
| 136 |
+
|
| 137 |
+
# 5.1 Setups
|
| 138 |
+
|
| 139 |
+
We compare ACCENT with baseline methods for event commonsense evaluation and also examine its two components separately. Therefore, our experiments include three setups for the evaluation:
|
| 140 |
+
|
| 141 |
+
Setup 1 (Metrics Performance) Our main goal is to evaluate the commonsense metric, and we achieve this by computing the correlation between automatic scores and human judgments. ACCENT and baseline metrics are tested on DECO test set and ConTurE Subset.
|
| 142 |
+
|
| 143 |
+
Setup 2 (Event-Relation Extraction) We evaluate the performance of the event-relation extraction component of ACCENT by comparing the automatically extracted tuples with human extracted tuples on DECO test set. We view checking whether a tuple with relation $r$ is extracted from the utterances $u_{n}$ and $u_{n - 1}$ as a binary classification problem and compute the F1 score. We also measure how "close" the automatically extracted head and tail events are to human extraction results. We convert the tuple into a sentence by concatenating the head and tail, and then compute BLEU-2 (Papineni et al., 2002) and BERTScore (Zhang et al., 2020c).
|
| 144 |
+
|
| 145 |
+
Setup 3 (Compatibility Test): The compatibility test component of ACCENT can be viewed as a tuple scoring task. We compare our proposed approach with other tuple scoring methods on a large-scale benchmark (Fang et al., 2021a) which contains event-relation tuples with 0 (compatible to a given CSKB) or 1 (not compatible to the CSKB) scores. Since the training relations in this benchmark differ from relations supported by the off-the-shelf COMET, we train our own COMET on its training set (see Appendix E.2 for more details) to make our compatibility test component applicable to this test set. This benchmark dataset covers all 12 relations in $\tilde{\mathcal{R}}$ as well as 6 more relations.
|
| 146 |
+
|
| 147 |
+
# 5.2 Baselines
|
| 148 |
+
|
| 149 |
+
We compare ACCENT with 5 baseline metrics: (1, 2) FED-understandable/appropriate (Mehri and Eskenazi, 2020) are two off-the-shelf baselines. "Understandable" and "Semantically Appropriate" are closer to commonsense compared to the rest of the criteria in FED. (3) Cross-encoder is a widely used model for sentence-pair regression tasks. We use BART (Lewis et al., 2020) as the backbone. (4) Cross-encoder (COMET) is a variant of (3) with COMET trained on $\mathrm{ATOMIC}_{20}^{20}$ as the back
|
| 150 |
+
|
| 151 |
+
bone. (5) MLP regressor (Zhou et al., 2021) is trained with neural features from DialogGPT and symbolic features from ConceptNet (details in §7). The cross-encoders and the MLP regressor require event commonsense scores to train the model in an end-to-end manner. We use the annotated scores in DECO training set to train them, and split $20\%$ data for validation to conduct hyperparameter search.
|
| 152 |
+
|
| 153 |
+
For Setup 2, we consider the following baseline approaches: (1) ASER Extractor (Zhang et al., 2020b) first extracts events through patterns from dependency parsing and then uses a neural classifier to predict the relation. (2) CSKB Search (Zhou et al., 2021) searches the one-hop neighbors in $\mathrm{ATOMIC}_{20}^{20}$ through keyword matching.
|
| 154 |
+
|
| 155 |
+
For Setup 3, we consider 4 tuple scoring baselines. These baselines convert a tuple to an embedding and train a binary classifier to give score: (1) BERT feeds $h, r, t$ to BERT and concatenates their [CLS] embeddings to get the tuple embedding. (2) BERTSAGE (Fang et al., 2021b) further concatenates the average embedding of the neighbors of $h$ and $t$ in an event knowledge graph. (3) KG-BERT (Yao et al., 2019) inputs "[CLS], $h$ , [SEP], $r$ , [SEP], $t$ " to get the tuple embedding. (4) KG-BERTSAGE (Fang et al., 2021a) further concatenates the average embedding of neighboring nodes. We use RoBERTaLARGE (Liu et al., 2020) as the backbone which has roughly the same parameter budget with COMET to have a fair comparison.
|
| 156 |
+
|
| 157 |
+
The details of the baseline implementations are in Appendix E.1.
|
| 158 |
+
|
| 159 |
+
# 5.3 ACCENT Implementation
|
| 160 |
+
|
| 161 |
+
The proposed ACCENT framework is implemented using the Transformers library (Wolf et al., 2020). For event-relation extraction, we fine-tune T5-base for 50 epochs with the batch size of 4 and the learning rate of 5e-5. The training data comes from the human extracted tuples from DECO training set. We additionally select 5 negative samples (dialogues that do not have a certain relation) per relation from the training set and set their target output as "None" to guide the model to handle cases which do not contain a certain relation. During inference, if no tuple is extracted after considering all relations, we assign a score of 0.5 to the sample. For compatibility test, we use the off-the-shelf COMET model trained on
|
| 162 |
+
|
| 163 |
+
<table><tr><td></td><td colspan="2">DECO</td><td colspan="2">ConTurE</td></tr><tr><td></td><td>γ</td><td>ρ</td><td>γ</td><td>ρ</td></tr><tr><td>FED-appropriate</td><td>-0.16</td><td>-0.10</td><td>-0.09</td><td>-0.04</td></tr><tr><td>FED-understandable</td><td>-0.12</td><td>-0.07</td><td>-0.08</td><td>-0.04</td></tr><tr><td>Cross-encoder</td><td>0.15</td><td>0.15</td><td>-0.05</td><td>-0.09</td></tr><tr><td>Cross-encoder (COMET)</td><td>0.17</td><td>0.17</td><td>0.00</td><td>0.00</td></tr><tr><td>MLP Regressor</td><td>0.11</td><td>0.01</td><td>0.17</td><td>0.16</td></tr><tr><td>ACCENT (Ours)</td><td>0.30</td><td>0.30</td><td>0.21</td><td>0.22</td></tr></table>
|
| 164 |
+
|
| 165 |
+
Table 1: Pearson $(\gamma)$ and Spearman $(\rho)$ correlations between human judgments and different automatic evaluation metrics. The results for ACCENT are all significant $(p < 0.05)$ .
|
| 166 |
+
|
| 167 |
+
$\mathrm{ATOMIC}_{20}^{20}$ (Hwang et al., 2021) $^8$ . When querying COMET through generation, we use beam search with a beam size of 10 to get commonly sensible tail events. The embed(·) in Equation (1) is parameterized by paraphrase-MiniLM-L6-v2 provided in the Sentence-Transformers library $^9$ .
|
| 168 |
+
|
| 169 |
+
# 6 Results and Analysis
|
| 170 |
+
|
| 171 |
+
# 6.1 Metrics Performance
|
| 172 |
+
|
| 173 |
+
Table 1 shows the correlations between automatic scores and human annotations. ACCENT uniformly outperforms the baselines on both two test sets. Specifically, off-the-shelf metrics ("FED-appropriate", "FED-understandable") perform poorly. For "Cross-encoder (COMET)", its results show that implicitly using the CSKB through transfer learning cannot yield satisfactory performance. Besides, cross-encoders fail to generalize well to ConTurE Subset whose dialogue histories are from human-machine dialogues. For "MLP Regressor", although it tries to utilize the CSKB explicitly, it is not as effective as ACCENT.
|
| 174 |
+
|
| 175 |
+
Some examples from the DECO test set and their event commonsense scores given by ACCENT are shown in Table 2. These scores are close to human judgements and enjoy great interpretability owning to the extracted event-relation tuples.
|
| 176 |
+
|
| 177 |
+
Apart from the sample-level correlation, we further examine whether ACCENT can reflect model performance in terms of event commonsense. Figure 6 shows the rankings of three dialogue systems used in DECO construction given by human and ACCENT. Human and ACCENT rank the three systems exactly the same and the two sets of averaged scores highly correlates with each other.
|
| 178 |
+
|
| 179 |
+
<table><tr><td>Dialogue History</td><td>Target Response</td><td>Human</td><td>ACCENT</td><td>Extracted Tuple</td></tr><tr><td>Yes, I run them all the time!
|
| 180 |
+
How exciting, are you ready?</td><td>I'm ready for the marathon! So I'm going to
|
| 181 |
+
be running a half marathon in a few weeks.</td><td>0.80</td><td>0.61</td><td>(PersonX is ready for the marathon,
|
| 182 |
+
xEffect, PersonX run a half marathon)</td></tr><tr><td>I like pizza as long as it's a
|
| 183 |
+
meat lover. I hate vegetables.</td><td>Lol, yes it is! Great for you to eat so healthy!</td><td>0.53</td><td>0.45</td><td>(PersonX hate vegetables, xAttr,
|
| 184 |
+
PersonX be healthy)</td></tr><tr><td>I had an accident.</td><td>That's what I like to call the most beautiful
|
| 185 |
+
thing.</td><td>0.27</td><td>0.23</td><td>(PersonX has an accident, oWant, PersonY
|
| 186 |
+
wants to call the most beautiful thing)</td></tr></table>
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 6: Average event commonsense scores of generated responses of different models using human annotations (scale 1 to 5) and ACCENT automatic evaluation (scale 0 to 1). The rankings of systems given by human and ACCENT are the same.
|
| 190 |
+
|
| 191 |
+
Table 2: Examples of evaluation scores given by ACCENT from the DECO test set. Only $u_{n-1}$ in the dialogue history is shown, and the human annotations are normalized to the range of (0, 1) for better demonstration. "Extracted Tuple" column shows tuples extracted in ACCENT framework which account for the given scores.
|
| 192 |
+
|
| 193 |
+
<table><tr><td></td><td>P</td><td>R</td><td>F1</td><td>BLEU</td><td>BERTScore</td></tr><tr><td>CSKB Search</td><td>29.9</td><td>96.3</td><td>45.7</td><td>26.9</td><td>89.8</td></tr><tr><td>ASER Extractor</td><td>31.5</td><td>23.6</td><td>27.0</td><td>32.4</td><td>89.3</td></tr><tr><td>Ours</td><td>31.4</td><td>55.0</td><td>40.0</td><td>41.6</td><td>93.5</td></tr></table>
|
| 194 |
+
|
| 195 |
+
# 6.2 Tuple Extraction Performance
|
| 196 |
+
|
| 197 |
+
Table 3 shows the results of Setup 2 where we evaluate the event-relation extraction performance on DECO test set. Our proposed method achieves much higher BLEU and BERTScore than two baselines, indicating that the composed events in tuples have reasonable quality. However, joint event-relation extraction remains challenging because it combines the event extraction and relation identification. Although our proposed method has higher score than ASER Extractor by F1, it still has plenty of room for improvement. As for CSKB Search, it usually returns a lot of tuples, thus resulting in high recall and very poor precision. Also, searching CSKB is not applicable in our framework because this method can only return sensible tuples.
|
| 198 |
+
|
| 199 |
+
Table 3: Performances of different event-relation extraction methods on DECO test set. P: Precision. R: Recall.
|
| 200 |
+
|
| 201 |
+
<table><tr><td></td><td>Subset</td><td>All</td></tr><tr><td>BERT</td><td>62.0±0.3</td><td>61.5±0.3</td></tr><tr><td>BERTSAGE</td><td>55.8±0.7</td><td>55.8±0.7</td></tr><tr><td>KG-BERT</td><td>62.6±0.7</td><td>62.3±0.8</td></tr><tr><td>KG-BERSAGE</td><td>63.2±0.4</td><td>62.9±0.3</td></tr><tr><td>Ours</td><td>68.0±0.8</td><td>67.6±0.8</td></tr></table>
|
| 202 |
+
|
| 203 |
+
Table 4: Test results on the CSKB compatibility benchmark. We report the overall AUC across all relations ("All") and the AUC across samples with our target relations ("Subset"). Both the averaged metric and its standard deviation are reported over 3 runs.
|
| 204 |
+
|
| 205 |
+
<table><tr><td colspan="2"></td><td>DECO</td><td>ConTurE</td></tr><tr><td colspan="2">ACCENT (whole)</td><td>0.30</td><td>0.22</td></tr><tr><td rowspan="4">I</td><td>ASER Extractor</td><td>0.14</td><td>0.00</td></tr><tr><td>w/o Pair</td><td>0.19</td><td>0.08</td></tr><tr><td>w/o Single</td><td>0.24</td><td>0.18</td></tr><tr><td>Gold Tuples</td><td>0.42</td><td>-</td></tr><tr><td rowspan="3">II</td><td>Bert</td><td>-0.08</td><td>0.09</td></tr><tr><td>KG-Bert</td><td>0.13</td><td>0.19</td></tr><tr><td>COMET (neural)</td><td>0.16</td><td>0.05</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 5: Ablation results measured by Spearman correlation. I: Ablation of the event-relation extraction part. The gray row shows the results using human extracted tuples which provides an upper bound. II: Ablation of the compatibility test part of ACCENT.
|
| 208 |
+
|
| 209 |
+
# 6.3 Compatibility Test Performance
|
| 210 |
+
|
| 211 |
+
Table 4 depicts the test results on the benchmark dataset. Our method outperforms all baselines, and it does not require negative samples for training. The major difference between our method and those tuple scoring baselines is that we use the tuples in the existing CSKB to train a dynamic CSKB, i.e., COMET, instead of a discriminative model. We assume our strong results may be due to the generalization ability of the dynamic CSKB.
|
| 212 |
+
|
| 213 |
+
# 6.4 Ablation Studies
|
| 214 |
+
|
| 215 |
+
We conduct ablation studies to explore (1) whether the proposed event-relation extraction method can
|
| 216 |
+
|
| 217 |
+
<table><tr><td></td><td>STS Avg.</td><td>DECO</td><td>ConTurE</td></tr><tr><td>Sentence-BERT</td><td>79.82</td><td>0.30</td><td>0.22</td></tr><tr><td>DiffCSE10</td><td>78.21</td><td>0.12</td><td>0.25</td></tr><tr><td>ESimCSE11</td><td>77.44</td><td>0.19</td><td>0.24</td></tr><tr><td>Sup-SimCSE12</td><td>82.52</td><td>0.31</td><td>0.26</td></tr></table>
|
| 218 |
+
|
| 219 |
+
Table 6: Results with different sentence embedding methods measured by Spearman correlation. Following Gao et al. (2021), we use the average results on the semantic textual similarity (STS) tasks to reflect the sentence embedding performance.
|
| 220 |
+
|
| 221 |
+
lead to better final metric performance; (2) given the automatically extracted tuples, whether the proposed compatibility test method can lead to higher correlation with human judgment.
|
| 222 |
+
|
| 223 |
+
To answer (1), we compare different methods to get the event-relation tuples (Part I in Table 5). Among the event-relation extraction baselines, we only consider ASER Extractor because CSKB search is not applicable in our framework as discussed in ��6.2. Note that the event-relation extractor in ACCENT considers tuples in both "Single" and "Pair" settings to cover two potential types of errors (see §3.2). To verify this, we compare the variations of our proposed method where we only use tuples marked as "Single" or "Pair" for model training. Also, the human extracted tuples in DECO test set are used to provide an upper bound.
|
| 224 |
+
|
| 225 |
+
To answer (2), we fix the event-relation extraction part and change the compatibility test part (Part II in Table 5). We consider BERT and KG-BERT trained on the CSKB compatibility benchmark because they do not need event graph information and can be seamlessly applied to our compatibility test. Also, while we query COMET through tail generation, another intuitive design is using the model loss with “{h} {r} [GEN]” as the source and $t$ as the target to give scores. We map the loss to (0,1) through an exponential function, and name this alternative as “COMET (neural)” for it skips the symbolic decoding of $t_{gen}$ .
|
| 226 |
+
|
| 227 |
+
Table 5 demonstrates that the whole ACCENT gives the best result. Considering the variations of our design, "w/o Pair" gives much lower results, indicating that limiting the symbolic intermediate representation to only the information contained in the target response is not enough. This observation is in accord with our finding that some event commonsense errors occur when we take the dialogue history into account.
|
| 228 |
+
|
| 229 |
+
Another empirical discovery is that although "COMET (neural)" is a direct way of using the dynamic CSKB, its performance is poorer than what we propose in ACCENT. We assume that comparing $t$ and $t_{gen}$ in a symbolic fashion can yield more comparable scores among tuples with different relations (details in Appendix F).
|
| 230 |
+
|
| 231 |
+
In our implementation of ACCENT, the comparison of $t$ and $t_{gen}$ is done by calculating the cosine similarity between their Sentence-BERT embeddings. We further experiment with other sentence embedding methods based on contrastive learning. Specifically, we consider DiffCSE (Chuang et al., 2022), ESimCSE (Wu et al., 2022) which are two unsupervised contrastive learning frameworks for learning sentence embeddings. We also consider Sup-SimCSE (Gao et al., 2021) which leverages annotated natural language inference datasets by using "entailment" pairs as positives and "contradiction" pairs as hard negatives in the contrastive learning objective. As shown in Table 6, ACCENT can benefit from the improvement of the sentence embedding method, i.e., using Sup-SimCSE (Gao et al., 2021). We support both Sentence-BERT and Sup-SimCSE in our released ACCENT codebase.
|
| 232 |
+
|
| 233 |
+
# 6.5 Error Analysis
|
| 234 |
+
|
| 235 |
+
Since ACCENT is a pipeline framework, there is likely error propagation. In section 6.4, we rule out the errors introduced by the event-relation extraction component by using human-extracted gold tuples. Results show that ACCENT with gold tuples (see "Gold Tuples" in Table 5) gives higher correlation with human judgment than "ACCENT (whole)" which uses the model-extracted tuples, indicating that ACCENT can benefit from high quality symbolic intermediate representation. We further include a qualitative analysis of the automatically extracted tuples in Appendix G, and believe improving the joint event-relation extraction is a worthwhile direction for future work.
|
| 236 |
+
|
| 237 |
+
# 7 Related Work
|
| 238 |
+
|
| 239 |
+
Automatic Open-Domain Dialogue Evaluation The evaluation of open-domain dialogue systems has long been a challenge due to the system's open-
|
| 240 |
+
|
| 241 |
+
ended goal (Huang et al., 2020), and simply scoring the overall quality is far from enough (Finch and Choi, 2020). Thus, researchers have decomposed the evaluation of open-domain dialogues into multiple facets and developed corresponding automatic evaluation metrics (Pang et al., 2020; Mehri and Eskenazi, 2020). While aspects like context coherence (Tao et al., 2018; Ghazarian et al., 2022b), diversity (Hashimoto et al., 2019), engagement (Ghazarian et al., 2020), have been systematically studied in the literature, the aspect of commonsense has long been neglected.
|
| 242 |
+
|
| 243 |
+
The closest related work is Zhou et al. (2021) which is mainly about commonsense-focused dialogues collection but also proposes an automatic metric for commonsense evaluation by training an MLP regressor on both symbolic and neural features. The symbolic features include the numbers of one-hop and two-hop triplets in ConceptNet (Speer et al., 2017) that can be found between the target response and its dialogue history. Although this metric utilizes the CSKB explicitly, it is limited to the direct search with surface form and only considers the number of triplets, and the CSKB used in the work is more about concepts but not event commonsense.
|
| 244 |
+
|
| 245 |
+
Joint Event-Relation Extraction While event extraction (Ahn, 2006) and relation identification (Do et al., 2011) are well-studied, how to jointly acquire them remains a challenge. We argue that joint event-relation extraction is an important problem because in practical use cases, the input is usually free-form text without pre-extracted events. Zhang et al. (2020b) is a pioneer work trying to jointly extract event and relation through a pipeline to automatically construct large knowledge graphs. Researchers in this work resort to rule-based methods for event extraction and train a classifier to predict the relation between a pair of events.
|
| 246 |
+
|
| 247 |
+
CSKB Compatibility CSKB population enlarges CSKB automatically by adding new links or nodes which are compatible with the commonsense knowledge to the existing CSKB. In Fang et al. (2021a,b), researchers try to add events from a large event knowledge graph to a CSKB. Compatibility test component of ACCENT is relevant to CSKB population task and it is defined in a more general setting where the head and tail of the tuple can be arbitrary events.
|
| 248 |
+
|
| 249 |
+
# 8 Conclusion
|
| 250 |
+
|
| 251 |
+
We present ACCENT, an automatic evaluation metric for event commonsense evaluation of open-domain dialogue systems. We show that by using event-relation tuples as the symbolic intermediate representations, ACCENT can effectively utilize the CSKB and achieve a decent correlation with human judgments for dialogue commonsense evaluation.
|
| 252 |
+
|
| 253 |
+
# 9 Limitations
|
| 254 |
+
|
| 255 |
+
In this work, we conduct research on event commonsense of open-domain dialogue systems for the first time. While achieving higher correlations with human judgments than existing baselines, AC-CENT has some limitations:
|
| 256 |
+
|
| 257 |
+
First, the ACCENT framework is based on a fixed set of event relations and the commonsense knowledge in $\mathrm{ATOMIC}_{20}^{20}$ which may fail to cover some potential event commonsense aspects. We believe augmenting the current framework with more commonsense resources is a worthwhile direction for the further improvement of ACCENT.
|
| 258 |
+
|
| 259 |
+
Second, the event-relation extractor in ACCENT framework is a T5 model fine-tuned in a low resource setting. Although the current model can yield fairly strong performance, it is an important research direction to improve the joint event-relation extraction component because the extracted tuples serve as the symbolic representation for commonsense reasoning in ACCENT framework. Since human extracted tuples are very costly to collect, we hope to explore whether we can improve this component through high-quality synthetic data construction or transfer learning in the future.
|
| 260 |
+
|
| 261 |
+
# 10 Acknowledgments
|
| 262 |
+
|
| 263 |
+
We thank the PlusLab members and the anonymous reviewers for their constructive feedback. This work is supported in part by the DARPA Machine Common Sense (MCS) program under Cooperative Agreement N66001-19-2-4032, and a Meta Sponsored research award.
|
| 264 |
+
|
| 265 |
+
# References
|
| 266 |
+
|
| 267 |
+
David Ahn. 2006. The stages of event extraction. In Proceedings of the Workshop on Annotating and Reasoning about Time and Events, pages 1-8, Sydney, Australia. Association for Computational Linguistics.
|
| 268 |
+
|
| 269 |
+
Siqi Bao, Huang He, Fan Wang, Hua Wu, Haifeng Wang, Wenquan Wu, Zhen Guo, Zhibin Liu, and Xinchao Xu. 2021. PLATO-2: Towards building an open-domain chatbot via curriculum learning. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 2513–2525, Online. Association for Computational Linguistics.
|
| 270 |
+
Antoine Bosselut, Hannah Rashkin, Maarten Sap, Chaitanya Malaviya, Asli Celikyilmaz, and Yejin Choi. 2019. COMET: Commonsense transformers for automatic knowledge graph construction. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4762-4779, Florence, Italy. Association for Computational Linguistics.
|
| 271 |
+
Hongshen Chen, Xiaorui Liu, Dawei Yin, and Jiliang Tang. 2017. A survey on dialogue systems: Recent advances and new frontiers. Acm Sigkdd Explorations Newsletter, 19(2):25-35.
|
| 272 |
+
Muhao Chen, Hongming Zhang, Qiang Ning, Manling Li, Heng Ji, Kathleen McKeown, and Dan Roth. 2021. Event-centric natural language processing. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Tutorial Abstracts, pages 6-14, Online. Association for Computational Linguistics.
|
| 273 |
+
Yung-Sung Chuang, Rumen Dangovski, Hongyin Luo, Yang Zhang, Shiyu Chang, Marin Soljacic, Shang-Wen Li, Scott Yih, Yoon Kim, and James Glass. 2022. DiffCSE: Difference-based contrastive learning for sentence embeddings. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4207-4218, Seattle, United States. Association for Computational Linguistics.
|
| 274 |
+
Quang Do, Yee Seng Chan, and Dan Roth. 2011. Minimally supervised event causality identification. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 294-303, Edinburgh, Scotland, UK. Association for Computational Linguistics.
|
| 275 |
+
Nouha Dziri, Ehsan Kamalloo, Kory W Mathewson, and Osmar Zaiane. 2018. Augmenting neural response generation with context-aware topical attention. arXiv preprint arXiv:1811.01063.
|
| 276 |
+
Tianqing Fang, Weiqi Wang, Sehyun Choi, Shibo Hao, Hongming Zhang, Yangqiu Song, and Bin He. 2021a. Benchmarking commonsense knowledge base population with an effective evaluation dataset. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 8949-8964, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 277 |
+
Tianqing Fang, Hongming Zhang, Weiqi Wang, Yangqiu Song, and Bin He. 2021b. Discos: Bridg-
|
| 278 |
+
|
| 279 |
+
ing the gap between discourse knowledge and commonsense knowledge. In Proceedings of the Web Conference 2021, pages 2648-2659.
|
| 280 |
+
Sarah E. Finch and Jinho D. Choi. 2020. Towards unified dialogue system evaluation: A comprehensive analysis of current evaluation protocols. In Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 236-245, 1st virtual meeting. Association for Computational Linguistics.
|
| 281 |
+
Joseph L Fleiss. 1971. Measuring nominal scale agreement among many raters. Psychological bulletin, 76(5):378.
|
| 282 |
+
Brett M Frischmann. 2021. Common sense commons: The case of commonsensical social norms. Available at SSRN 3781955.
|
| 283 |
+
Tianyu Gao, Xingcheng Yao, and Danqi Chen. 2021. SimCSE: Simple contrastive learning of sentence embeddings. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6894-6910, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 284 |
+
Sarik Ghazarian, Behnam Hedayatnia, Alexandros Papangelis, Yang Liu, and Dilek Hakkani-Tur. 2022a. What is wrong with you?: Leveraging user sentiment for automatic dialog evaluation. In Findings of the Association for Computational Linguistics: ACL 2022, pages 4194-4204, Dublin, Ireland. Association for Computational Linguistics.
|
| 285 |
+
Sarik Ghazarian, Zixi Liu, Tuhin Chakrabarty, Xuezhe Ma, Aram Galstyan, and Nanyun Peng. 2021. DiS-CoL: Toward engaging dialogue systems through conversational line guided response generation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Demonstrations, pages 26-34, Online. Association for Computational Linguistics.
|
| 286 |
+
Sarik Ghazarian, Ralph Weischedel, Aram Galstyan, and Nanyun Peng. 2020. Predictive engagement: An efficient metric for automatic evaluation of open-domain dialogue systems. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 7789-7796.
|
| 287 |
+
Sarik Ghazarian, Nuan Wen, Aram Galstyan, and Nanyun Peng. 2022b. DEAM: Dialogue coherence evaluation using AMR-based semantic manipulations. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 771-785, Dublin, Ireland. Association for Computational Linguistics.
|
| 288 |
+
Karthik Gopalakrishnan, Behnam Hedayatnia, Qinlang Chen, Anna Gottardi, Sanjeev Kwatra, Anu Venkatesh, Raefer Gabriel, and Dilek Hakkani-Tur. 2019. Topical-Chat: Towards Knowledge-Grounded
|
| 289 |
+
|
| 290 |
+
Open-Domain Conversations. In Proc. Interspeech 2019, pages 1891-1895.
|
| 291 |
+
Tatsunori B. Hashimoto, Hugh Zhang, and Percy Liang. 2019. Unifying human and statistical evaluation for natural language generation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1689-1701, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 292 |
+
Minlie Huang, Xiaoyan Zhu, and Jianfeng Gao. 2020. Challenges in building intelligent open-domain dialog systems. ACM Transactions on Information Systems (TOIS), 38(3):1-32.
|
| 293 |
+
Jena D. Hwang, Chandra Bhagavatula, Ronan Le Bras, Jeff Da, Keisuke Sakaguchi, Antoine Bosselut, and Yejin Choi. 2021. Comet-atomic 2020: On symbolic and neural commonsense knowledge graphs. In AAAI.
|
| 294 |
+
Benjamin Kuipers. 1984. Commonsense reasoning about causality: deriving behavior from structure. Artificial intelligence, 24(1-3):169-203.
|
| 295 |
+
Yann LeCun. 2022. A path towards autonomous machine intelligence version 0.9. 2, 2022-06-27.
|
| 296 |
+
Douglas B Lenat. 1995. Cyc: A large-scale investment in knowledge infrastructure. Communications of the ACM, 38(11):33-38.
|
| 297 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
|
| 298 |
+
Yanran Li, Hui Su, Xiaoyu Shen, Wenjie Li, Ziqiang Cao, and Shuzi Niu. 2017. DailyDialog: A manually labelled multi-turn dialogue dataset. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 986-995, Taipei, Taiwan. Asian Federation of Natural Language Processing.
|
| 299 |
+
Hugo Liu and Push Singh. 2004. Conceptnet—a practical commonsense reasoning tool-kit. BT technology journal, 22(4):211-226.
|
| 300 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Ro{bert}a: A robustly optimized {bert} pretraining approach.
|
| 301 |
+
John McCarthy and Patrick J Hayes. 1981. Some philosophical problems from the standpoint of artificial intelligence. In Readings in artificial intelligence, pages 431-450. Elsevier.
|
| 302 |
+
|
| 303 |
+
Shikib Mehri and Maxine Eskenazi. 2020. Unsupervised evaluation of interactive dialog with DialogoGPT. In Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 225-235, 1st virtual meeting. Association for Computational Linguistics.
|
| 304 |
+
Nasrin Mostafazadeh, Aditya Kalyanpur, Lori Moon, David Buchanan, Lauren Berkowitz, Or Biran, and Jennifer Chu-Carroll. 2020. GLUCOSE: GeneraLized and COntextualized story explanations. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4569-4586, Online. Association for Computational Linguistics.
|
| 305 |
+
Bo Pang, Erik Nijkamp, Wenjuan Han, Linqi Zhou, Yixian Liu, and Kewei Tu. 2020. Towards holistic and automatic evaluation of open-domain dialogue generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 3619-3629, Online. Association for Computational Linguistics.
|
| 306 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.
|
| 307 |
+
Fabio Petroni, Tim Rocttäschel, Sebastian Riedel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, and Alexander Miller. 2019. Language models as knowledge bases? In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2463-2473, Hong Kong, China. Association for Computational Linguistics.
|
| 308 |
+
James Pustejovsky, Patrick Hanks, Roser Sauri, Andrew See, Robert Gaizauskas, Andrea Setzer, Dragomir Radev, Beth Sundheim, David Day, Lisa Ferro, et al. 2003. The timebank corpus. In Corpus linguistics, volume 2003, page 40. Lancaster, UK.
|
| 309 |
+
Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
|
| 310 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67.
|
| 311 |
+
Hannah Rashkin, Maarten Sap, Emily Allaway, Noah A. Smith, and Yejin Choi. 2018. Event2Mind: Commonsense inference on events, intents, and reactions. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1:
|
| 312 |
+
|
| 313 |
+
Long Papers), pages 463-473, Melbourne, Australia. Association for Computational Linguistics.
|
| 314 |
+
Nils Reimers and Iryna Gurevych. 2019. SentenceBERT: Sentence embeddings using Siamese BERTnetworks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3982-3992, Hong Kong, China. Association for Computational Linguistics.
|
| 315 |
+
Maarten Sap, Ronan Le Bras, Emily Allaway, Chandra Bhagavatula, Nicholas Lourie, Hannah Rashkin, Brendan Roof, Noah A Smith, and Yejin Choi. 2019a. Atomic: An atlas of machine commonsense for if then reasoning. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pages 3027-3035.
|
| 316 |
+
Maarten Sap, Hannah Rashkin, Derek Chen, Ronan Le Bras, and Yejin Choi. 2019b. Social IQa: Commonsense reasoning about social interactions. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4463-4473, Hong Kong, China. Association for Computational Linguistics.
|
| 317 |
+
Maarten Sap, Vered Shwartz, Antoine Bosselut, Yejin Choi, and Dan Roth. 2020. Commonsense reasoning for natural language processing. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 27-33, Online. Association for Computational Linguistics.
|
| 318 |
+
Robyn Speer, Joshua Chin, and Catherine Havasi. 2017. Conceptnet 5.5: An open multilingual graph of general knowledge. In *Thirty-first AAAI conference on artificial intelligence*.
|
| 319 |
+
Leonard Talmy. 1988. Force dynamics in language and cognition. Cognitive science, 12(1):49-100.
|
| 320 |
+
Chongyang Tao, Lili Mou, Dongyan Zhao, and Rui Yan. 2018. Rubber: An unsupervised method for automatic evaluation of open-domain dialog systems. In Thirty-Second AAAI Conference on Artificial Intelligence.
|
| 321 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 322 |
+
|
| 323 |
+
Xing Wu, Chaochen Gao, Liangjun Zang, Jizhong Han, Zhongyuan Wang, and Songlin Hu. 2022. ESim-CSE: Enhanced sample building method for contrastive learning of unsupervised sentence embedding. In Proceedings of the 29th International Conference on Computational Linguistics, pages 3898-3907, Gyeongju, Republic of Korea. International Committee on Computational Linguistics.
|
| 324 |
+
Liang Yao, Chengsheng Mao, and Yuan Luo. 2019. Kgbert: Bert for knowledge graph completion. arXiv preprint arXiv:1909.03193.
|
| 325 |
+
Hongming Zhang, Daniel Khashabi, Yangqiu Song, and Dan Roth. 2020a. Transomics: From linguistic graphs to commonsense knowledge. In Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 4004-4010. International Joint Conferences on Artificial Intelligence Organization. Main track.
|
| 326 |
+
Hongming Zhang, Xin Liu, Haojie Pan, Yangqiu Song, and Cane Wing-Ki Leung. 2020b. ASER: A large-scale eventuality knowledge graph. In WWW, pages 201-211.
|
| 327 |
+
Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Personalizing dialogue agents: I have a dog, do you have pets too? In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2204-2213, Melbourne, Australia. Association for Computational Linguistics.
|
| 328 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020c. *Bertscore: Evaluating text generation with bert*. In *International Conference on Learning Representations*.
|
| 329 |
+
Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, and Bill Dolan. 2020d. DIALOGPT: Large-scale generative pre-training for conversational response generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 270-278, Online. Association for Computational Linguistics.
|
| 330 |
+
Pei Zhou, Karthik Gopalakrishnan, Behnam Hedayatnia, Seokhwan Kim, Jay Pujara, Xiang Ren, Yang Liu, and Dilek Hakkani-Tur. 2021. Commonsense-focused dialogues for response generation: An empirical study. In Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 121-132, Singapore and Online. Association for Computational Linguistics.
|
| 331 |
+
|
| 332 |
+
# A Commonsense Knowledge Bases
|
| 333 |
+
|
| 334 |
+
To endow machines with commonsense reasoning abilities, a growing number of CSKBs are developed through human annotation and information extraction. From earlier CSKBs, ConceptNet (Liu and Singh, 2004; Speer et al., 2017) focuses more on taxonomic (e.g., "IsA") and lexical (e.g., "Synonym", "RelatedTo") knowledge; TransOMCS (Zhang et al., 2020a) automates the knowledge base construction by leveraging the same limited set of relations defined in ConceptNet.
|
| 335 |
+
|
| 336 |
+
Recent CSKBs give more focus on event commonsense knowledge. In this work, we select $\mathrm{ATOMIC}_{20}^{20}$ (Hwang et al., 2021) as the knowledge source of ACCENT framework because it is a comprehensive CSKB with rich knowledge regarding how events and human actions are associated with each other. For comparison, ATOMIC (Sap et al., 2019a), as the pioneer of $\mathrm{ATOMIC}_{20}^{20}$ , consists of only nine relations and therefore poses limitations. Another recent event-centric CSKB is GLUCOSE (Mostafazadeh et al., 2020), which however focuses on a specific part of event commonsense (mostly on causal inference) and is less comprehensive and suitable for our work.
|
| 337 |
+
|
| 338 |
+
# B Pseudo Code of ACCENT
|
| 339 |
+
|
| 340 |
+
In §3, we introduce the symbolic intermediate representation and the two components in ACCENT. Algorithm 1 displays the skeleton of the whole framework.
|
| 341 |
+
|
| 342 |
+
Line 3-9 in Algorithm 1 show the joint event-relation extraction in ACCENT. We query the event-relation extraction model $\mathcal{M}$ with prompts for each relation. The head and tail events can be parsed from the generated result if it is not "None" and follows the pre-defined format, i.e., "event1: {head}; event2: {tail}". Line 16-21 in Algorithm 1 show the compatibility test in ACCENT. Each tuple is given a score based on the maximum cosine similarity between its tail and the commonsense tails obtained from the dynamic CSKB $\mathcal{C}$ . After calculating scores for each extracted tuple, we average them to get the event commonsense score for the target utterance (Line 24 in Algorithm 1).
|
| 343 |
+
|
| 344 |
+
# C Event Relations
|
| 345 |
+
|
| 346 |
+
As introduced in §3.1, ACCENT selects relations from $\mathrm{ATOMIC}_{20}^{20}$ which are related to event commonsense. These event relations can help cover
|
| 347 |
+
|
| 348 |
+
Algorithm 1: ACCENT framework.
|
| 349 |
+
Input: Dialogue history $h$ target utterance $u$ prompt dict $P$ extractor $\mathcal{M}$ dynamic CSKB C, sentence embedder E Output:Event commonsense score s tuples $\leftarrow \left[\right]$
|
| 350 |
+
2 // Event-relation extraction.
|
| 351 |
+
3 foreach (rel, prompt) in $\mathcal{P}$ do raw_output $\leftarrow$ generate $(\mathcal{M},h,u,p)$ if check_format(raw_output) then (head,tail) $\leftarrow$ parse(raw_output) tuples.append(head,rel,tail)) end
|
| 352 |
+
9 end
|
| 353 |
+
10 // Compatibility test.
|
| 354 |
+
11 if is_empty(tuples) then return 0.5
|
| 355 |
+
13 else tuple Scores $\leftarrow \left[\right]$
|
| 356 |
+
15 foreach (head,rel,tail) in tuples do score $\leftarrow 0$ cs_tails $\leftarrow$ query(C,head,rel) foreach cs_tail in cs_tails do x $\leftarrow$ cos(E(tail),E(cs_tail)) score $\leftarrow$ max(score,x) end tuple Scores.append(score)
|
| 357 |
+
23 end
|
| 358 |
+
24 return average(tuple Scores)
|
| 359 |
+
25 end
|
| 360 |
+
|
| 361 |
+
different types of insensitivity for event commonsense evaluation of open-domain dialogues. Table 7 shows examples in DECO where the system response violates event commonsense in terms of different types of event relations.
|
| 362 |
+
|
| 363 |
+
Note that although "Cause" and "xReason" in $\mathrm{ATOMIC}_{20}^{20}$ are also related to event commonsense, we exclude them from the selected subset $\tilde{\mathcal{R}}$ . This is because the cause-effect relation can be covered by "xEffect"/"oEffect" and tuples with "Cause" and "xReason" relations take up less than $0.1\%$ percent of $\mathrm{ATOMIC}_{20}^{20}$ . Moreover, we exclude "IsBefore" because a tuple with "IsBefore" relation can be equivalently converted to a tuple with "IsAfter" relation by switching the head and tail. As shown in Table 8, for each relation in $\tilde{\mathcal{R}}$ , a prompt is manually designed to explain its semantic meaning. These designed prompts give more hints to the pretrained model and allow a single model to extract tuples for different relations.
|
| 364 |
+
|
| 365 |
+
# D Additional Details of Data Collection
|
| 366 |
+
|
| 367 |
+
# D.1 Quality Control
|
| 368 |
+
|
| 369 |
+
To ensure the annotators have a good understanding of event and event commonsense, we restrict the annotators from English-speaking countries, and those who have finished at least 5,000 HITs with an acceptance rate $>97\%$ . The compensation rate for annotators is calculated using a per hour wage of $16.
|
| 370 |
+
|
| 371 |
+
For commonsense scoring (see §4.3), we requested 3 annotators to score each sample, and we instructed them to specifically consider events and their relations in the dialogue to give the event commonsense score. Figure 7 shows the annotation guideline we used for event commonsense scoring. We also set a sample for attention check in each HIT. HITs that failed the check were reassigned to other annotators.
|
| 372 |
+
|
| 373 |
+
For tuple extraction (see §4.2), we conducted a training round before the large scale annotation and 8 annotators proceeded to the final round. Each HIT in this task was assigned to 2 individual annotators. The template used to collect event-relation tuples is shown in Figure 8. When validating the extracted tuples, 3 annotators judged each tuple, and we achieved Fleiss' Kappa (Fleiss, 1971) $\kappa = 0.491$ (moderate aggreement). Tuples marked
|
| 374 |
+
|
| 375 |
+
as invalid by the majority vote are not included in the final dataset.
|
| 376 |
+
|
| 377 |
+
# D.2 Dataset Statistics
|
| 378 |
+
|
| 379 |
+
Table 9 gives the statistics of DECO and ConTurE Subset. Although machine generated responses in DECO are given by advanced open-domain dialogue systems, some event commonsense errors still exist. For ConTurE Subset, we use it to test the generalization ability of different metrics. Table 10 gives the numbers of human extracted event-relation tuples. Note that the test set of DECO is thoroughly annotated (we consider every relation on each sample) to provide a test set for the joint event-relation extraction task. All the data we collected are in English.
|
| 380 |
+
|
| 381 |
+
# D.3 License
|
| 382 |
+
|
| 383 |
+
The development of DECO and ConTurE Subset is based on the dialogues coming from DailyDialog, PersonaChat, TopicalChat, and ConTurE. PersonaChat $^{14}$ , TopicalChat $^{15}$ and ConTurE $^{16}$ are licensed. We ensure that we did not violate any license conditions when developing our datasets.
|
| 384 |
+
|
| 385 |
+
# E Additional Details of Experiment
|
| 386 |
+
|
| 387 |
+
This section describes more details about baseline implementation, applying ACCENT to CSKB benchmark, and computational resources. The implementation details of the proposed ACCENT framework are discussed in §5.3
|
| 388 |
+
|
| 389 |
+
# E.1 Baseline Implementation
|
| 390 |
+
|
| 391 |
+
We compare ACCENT with 5 baseline metrics on event commonsense evaluation. All the metrics are tested on DECO test set and ConTurE Subset. For metrics which require training, the training set of DECO is used, and we split $20\%$ data for validation. The implementation details are as follows:
|
| 392 |
+
|
| 393 |
+
- FED-understandable/appropriate: We use their released model<sup>17</sup>.
|
| 394 |
+
- Cross-encoder: We use the cross-encoder with a regression head implemented in the Sentence-Tranformers library (Reimers and
|
| 395 |
+
|
| 396 |
+
<table><tr><td>Relation</td><td>Negative Example</td><td>Event-Relation Tuple</td></tr><tr><td>xIntent Motivation</td><td>A: Stay around for a while. The party is warming up.
|
| 397 |
+
B: We'll need to get you some ice cream, you know, to warm up your body.</td><td>(PersonX gets PersonY some ice cream, xIntent, PersonX warms up PersonY's body)</td></tr><tr><td>xNeed Need</td><td>A: I boated around the world with my husband when we were younger.
|
| 398 |
+
B: I love boating. I also like to paint, I just need an oxygen mask. I need a life.</td><td>(PersonX loves boating, xNeed, PersonX needs an oxygen mask), (PersonX likes to paint, xNeed, PersonX needs an oxygen mask)</td></tr><tr><td>xReact, oReact Reaction</td><td>A: That is funny! At work they make me wear a uniform, boohoo!
|
| 399 |
+
B: That is unfortunate, I actually lost my arm from a car accident so I do not have to.
|
| 400 |
+
A: That is interesting! Do you know Apple has anything to say on that?</td><td>(PersonX loses PersonX's arm from a car accident, oReact, PersonY feels interesting)</td></tr><tr><td>xWant, oWant Want</td><td>A: We don't give bonus every month, but we offer a semi-annual bonus. And you will receive two weeks paid vacation a year, as well. Does it suit you?
|
| 401 |
+
B: Yes, thank you. May I ask for an apartment?
|
| 402 |
+
A: No... I want to take your word on that one! It'll be all I need:)</td><td>(PersonX asks for an apartment, oWant, PersonY wants to take PersonX's word)</td></tr><tr><td>xAttr Description</td><td>A: Are you a vegetarian? I am.
|
| 403 |
+
B: Yes I am. I do not like meat.
|
| 404 |
+
A: I'm a vegetarian and I love meat.</td><td>(PersonX loves meat, xAttr, PersonX be a vegetarian)</td></tr><tr><td>xEffect, oEffect Cause-Effect</td><td>A: How you celebrate your Valentine's Day with your wife?
|
| 405 |
+
B: I am not sure about you, but my wife is not into Valentine's day ... So we celebrate a lot.</td><td>(PersonX be not into Valentine's day, xEffect, PersonX celebrates a lot)</td></tr><tr><td>HinderedBy Constraint</td><td>A: My mom does not bake, she does not even cook.
|
| 406 |
+
B: My mom used to cook for my family, but I think my mom's got too big to cook anything for anymore.</td><td>(PersonX cooks for family, HinderedBy, PersonX gets too big)</td></tr><tr><td>IsAfter Temporal</td><td>A: Marco has fallen off a ladder. I think he's hurt his back. What shall we do?
|
| 407 |
+
B: Marco is still on the ladder, it just got knocked over. Marco will not get any sleep.</td><td>(PersonX be on the ladder, isAfter, PersonX gets knocked over)</td></tr><tr><td>HasSubEvent Parent-Child</td><td>A: Yeah he was an internal medicine practitioner before he turned to comedy so he attended to the woman until medics arrived.
|
| 408 |
+
B: Ohhh I see. I thought he was in the audience when he was having the seizure.</td><td>(PersonX attends to the woman, HasSubEvent, PersonX has the seizure)</td></tr></table>
|
| 409 |
+
|
| 410 |
+
Table 7: Event relations with corresponding negative examples in DECO. $\square$ denotes responses generated by open-domain dialogue systems. Each example contains events (highlighted with green and yellow) which violate event commonsense in terms of the corresponding event relation. Such event commonsense errors can be captured by nonsensical event-relation tuples.
|
| 411 |
+
|
| 412 |
+
<table><tr><td>Relation</td><td>Semantic Meaning</td><td>Designed Prompt (Extract event1 and event2 from the text where ...)</td></tr><tr><td>xIntent</td><td>because PersonX wanted</td><td>event2 shows PersonX's intent for event1.</td></tr><tr><td>xNeed</td><td>but before, PersonX needed</td><td>event2 needs to be true for event1 to take place.</td></tr><tr><td>xReact</td><td>as a result, PersonX feels</td><td>event2 shows how PersonX reacts to event1.</td></tr><tr><td>oReact</td><td>as a result, Y or others feels</td><td>event2 shows how PersonY reacts to event1.</td></tr><tr><td>xWant</td><td>as a result, PersonX wants</td><td>event2 shows what PersonX wants after event1 happens.</td></tr><tr><td>oWant</td><td>as a result, Y or others wants</td><td>event2 shows what PersonY wants after event1 happens.</td></tr><tr><td>xAttr</td><td>X is seen as</td><td>event2 shows how PersonX is viewed as after event1.</td></tr><tr><td>xEffect</td><td>as a result, PersonX will</td><td>event2 shows the effect of event1 on PersonX.</td></tr><tr><td>oEffect</td><td>as a result, Y or others will</td><td>event2 shows the effect of event1 on PersonY.</td></tr><tr><td>HinderedBy</td><td>can be hindered by</td><td>event1 fails to happen because event2.</td></tr><tr><td>IsAfter</td><td>happens after</td><td>event1 happens after event2.</td></tr><tr><td>HasSubEvent</td><td>includes the event/action</td><td>event1 includes event2.</td></tr></table>
|
| 413 |
+
|
| 414 |
+
Table 8: Semantic meanings and designed prompts for the selected $\mathrm{ATOMIC}_{20}^{20}$ relations. The semantic meanings are from Hwang et al. (2021).
|
| 415 |
+
|
| 416 |
+
In this survey, we are specifically interested in the event commonsense. The core of event commonsense concerns the events and the relations between them. So you need to focus on the events in the response or its history and whether they are sensibly related to each other.
|
| 417 |
+
|
| 418 |
+
Hint More explanation for Event and Relation:
|
| 419 |
+
|
| 420 |
+
# Event
|
| 421 |
+
|
| 422 |
+
An event is something that happens (e.g., actions) or processes a certain state (e.g., properties). We can usually describe an event through a verb-centric phrase. For example, in "I have participated in the 3000-meter race so I feel so tired now.", we can find two events, I participate in the 3000-meter race and I feel tired.
|
| 423 |
+
|
| 424 |
+
# Relation
|
| 425 |
+
|
| 426 |
+
Relation is about how one event relates to another event. In the former example, there exists a relation of "Effect" - "I feel tired" is the effect of "I participate in the 3000-meter race". Common relations in event commonsense are
|
| 427 |
+
|
| 428 |
+
- Motivation: A person does one event because he / she hopes to achieve another event.
|
| 429 |
+
- Prerequisite: A person needs one event to make another event happen.
|
| 430 |
+
- Constraint: One event may fail to happen because of another event.
|
| 431 |
+
- Effect: One event be the cause and another event be the effect.
|
| 432 |
+
- Reaction: One event (usually about the feeling or emotion) is the reaction to another event.
|
| 433 |
+
- Desire: One event happens, which triggers a person to want another event to happen.
|
| 434 |
+
Description: One event can be described as another event.
|
| 435 |
+
- Temporal Relations: Two events have temporal order, like one event happens after another event.
|
| 436 |
+
|
| 437 |
+
More examples can be found below.
|
| 438 |
+
|
| 439 |
+
A good way to check the event commonsense plausibility of a response is to think about the events in the response and check whether their relations are commonsensical. For example,
|
| 440 |
+
|
| 441 |
+
Dialogue History:
|
| 442 |
+
|
| 443 |
+
A: Is there any clue?
|
| 444 |
+
|
| 445 |
+
# Response:
|
| 446 |
+
|
| 447 |
+
B: If I had a clue, I'd be happy to help.
|
| 448 |
+
|
| 449 |
+
In the response, there are two events, "have a clue" and "be happy to help". From "If I ... I would ..." we can know the response assumes there is a relation of "Prerequisite" (you may also think of it as a temporal relation). This relation is sensible according to our commonsense. So, we think this response is absolutely commonsensical regarding the event commonsense.
|
| 450 |
+
|
| 451 |
+
Since the response is highly related to its previous utterance, you also need to consider them together. Similarly, you can think about the events in the previous utterance and check their relations to the events you have thought of in the response.
|
| 452 |
+
|
| 453 |
+
We provide more examples below. You may look at them if you are still not clear about how to evaluate. (Click "More Examples (click to expand/collapse)" below to see them!)
|
| 454 |
+
|
| 455 |
+
Figure 7: Annotation guideline for event commonsense scoring.
|
| 456 |
+
|
| 457 |
+
<table><tr><td>Dataset</td><td>Size</td><td>Average score</td><td>Average # tokens in target response</td></tr><tr><td>DECO</td><td>300</td><td>3.39</td><td>17.6</td></tr><tr><td>ConTurE Subset</td><td>100</td><td>2.88</td><td>15.7</td></tr></table>
|
| 458 |
+
|
| 459 |
+
Table 9: Statistics of collected dialogue in event commonsense evaluation datasets.
|
| 460 |
+
|
| 461 |
+
Gurevych, 2019). We use BART as the backbone model to align with the COMET model used in our method. For hyperparameter search, we fine-tune the cross-encoder for 10 epochs with the batch size of 8 and the learning rate from $\{1\mathrm{e} - 5,3\mathrm{e} - 5,5\mathrm{e} - 5\}$
|
| 462 |
+
|
| 463 |
+
- Cross-encoder (COMET): We use the off-the-shelf COMET model trained on $\mathrm{ATOMIC}_{20}^{2018}$ as the backbone model. Other implementation details are the same with Cross-encoder.
|
| 464 |
+
- MLP regressor: Since the code of Zhou et al. (2021) is not publicly available, we produce the results using our own implementation based on scikit-learn<sup>19</sup>. Our implementation is available in our released codebase.
|
| 465 |
+
|
| 466 |
+
For event-relation extraction baselines, the im
|
| 467 |
+
|
| 468 |
+
<table><tr><td></td><td>xIntent</td><td>xNeed</td><td>xReact</td><td>oReact</td><td>xWant</td><td>oWant</td><td>xAttr</td><td>xEffect</td><td>oEffect</td><td>HinderedBy</td><td>IsAfter</td><td>HasSubEvent</td></tr><tr><td>Train (few-shot)</td><td>20</td><td>20</td><td>24</td><td>30</td><td>22</td><td>22</td><td>25</td><td>37</td><td>22</td><td>29</td><td>26</td><td>30</td></tr><tr><td>Test</td><td>37</td><td>35</td><td>21</td><td>50</td><td>29</td><td>20</td><td>46</td><td>71</td><td>32</td><td>20</td><td>61</td><td>45</td></tr></table>
|
| 469 |
+
|
| 470 |
+
Table 10: Statistics of the collected event-relation tuples. We collect around 30 tuples for each relation from DECO training set to train the event-relation extractor in the few-shot learning setting. The test set of DECO is thoroughly annotated to provide a test set for the joint event-relation extraction task.
|
| 471 |
+
|
| 472 |
+
plementation details are as follows:
|
| 473 |
+
|
| 474 |
+
- ASER Extractor: We use their provided code $^{20}$ to extract events. The neural classifier for relation prediction is trained on the human annotated tuples in DECO training set.
|
| 475 |
+
- CSKB Search: We search tuples related to the target response and its previous response in $\mathrm{ATOMIC}_{20}^{20}$ following the CSKB search pipeline described in Zhou et al. (2021). A potential concept set is built from the utterances by identifying nouns, verbs, and adjectives that are not stopwords through part-of-speech (POS) tagging and lemmatizing them. We return tuples whose head and tail both contain words in the concept set as the search result.
|
| 476 |
+
|
| 477 |
+
For CSKB population baselines, we use the implementation in Fang et al. (2021a) $^{21}$ . For the backbone model, we use RoBERTa $_{\text{LARGE}}$ which has roughly the same parameter budget with COMET. We train all models for 1 epoch with the batch size of 64. The learning rate is searched from $\{1\mathrm{e} - 7, 1\mathrm{e} - 5, 1\mathrm{e} - 3\}$ on the validation set.
|
| 478 |
+
|
| 479 |
+
# E.2 Applying ACCENT to CSKB Benchmark
|
| 480 |
+
|
| 481 |
+
In §5 Setup 3, we apply the compatibility test approach in ACCENT to a CSKB benchmark. Such an application is seamless because the compatibility test also assigns a score to each tuple, and tuples which receive higher compatibility scores are naturally more suitable to populate the CSKB. We train the COMET model on the positive samples in the training set of the benchmark dataset for 1 epoch with the batch size of 64. The learning rate is searched from $\{1\mathrm{e} - 7,1\mathrm{e} - 5,1\mathrm{e} - 3\}$ on the validation set. Note that our approach does not require any negative sample in the training stage. It also does not need the event graph information provided in the benchmark dataset, but results in Table 4 shows
|
| 482 |
+
|
| 483 |
+
<table><tr><td></td><td>Pearson</td><td>Spearman</td></tr><tr><td>COMET (neural)</td><td>0.14</td><td>0.25</td></tr><tr><td>ACCENT approach</td><td>0.40</td><td>0.42</td></tr></table>
|
| 484 |
+
|
| 485 |
+
Table 11: Correlations between human judgments and different compatibility test approaches with human-extracted tuples on DECO test set.
|
| 486 |
+
|
| 487 |
+
that our method outperforms baselines which require manually created negative data and take in graph information.
|
| 488 |
+
|
| 489 |
+
# E.3 Computational Resources
|
| 490 |
+
|
| 491 |
+
We run BERTSAGE and KG-BERTSAGE for the CSKB benchmark experiments on a single Nvidia V100 GPU with 32 GB memory where these models require large memory consumption in the run time. The rest of experiments is done on a single Nvidia A10 GPU with 24 GB memory.
|
| 492 |
+
|
| 493 |
+
Note that although we develop the ACCENT framework based on large language models, the only part which requires training is the T5 model (with 223M parameters) for event-relation extraction. As discussed in §3.2, the model is fine-tuned in a low resource setting and the training takes less than 0.5 GPU hour.
|
| 494 |
+
|
| 495 |
+
# F More Discussion of the Compatibility Test Approach in ACCENT
|
| 496 |
+
|
| 497 |
+
ACCENT checks whether an event-relation tuple $(h, r, t)$ is compatible with the commonsense knowledge by comparing the similarity between $t$ and commonsense tails generated by the Dynamic CSKB (COMET). Ablation results in Table 5 show that the compatibility test approach in ACCENT yields better performance than the "COMET (neural)" alternative which also uses the COMET model. To exclude the potential noise introduced by the automatically extracted tuples, we further compare these two methods using human-extracted tuples on DECO test set. Results in Table 11 demonstrate that the conclusion still holds under this experimental setting. Table 12 gives
|
| 498 |
+
|
| 499 |
+
<table><tr><td>Dialogue</td><td>Tuple</td><td>Human</td><td>COMET(nueral)</td><td>ACCENT approach</td></tr><tr><td rowspan="2">A: I work in the bakery and eat all my favorite cupcakes. What do you do?</td><td>(PersonX makes a mistake, xEffect, PersonX gets fired)</td><td>/</td><td>0.33</td><td>0.63</td></tr><tr><td>(PersonX gets fired, isAfter, PersonX makes mistake)</td><td>/</td><td>0.12</td><td>0.68</td></tr><tr><td rowspan="2">B: I actually just got fired for a mistake I made.</td><td>(PersonX gets fired, HasSubEvent, PersonX makes mistake)</td><td>/</td><td>0.18</td><td>0.66</td></tr><tr><td>Average</td><td>0.80</td><td>0.21</td><td>0.66</td></tr><tr><td rowspan="2">A: Yeah winter is coming soon. It gonna be really cold.</td><td>(PersonX wants to live in a cold place, xIntent, PersonX intends to go full on winter)</td><td>/</td><td>0.06</td><td>0.59</td></tr><tr><td>(PersonX goes full on winter, xNeed, PersonX lives in cold place)</td><td>/</td><td>0.53</td><td>0.39</td></tr><tr><td rowspan="3">B: I know I know. I want to live in a cold place before I go full on winter.</td><td>(PersonX goes full on winter, isAfter, PersonX lives in a cold place)</td><td>/</td><td>0.95</td><td>0.55</td></tr><tr><td>(PersonX knows winter is coming, HasSubEvent, PersonX wants to live in a cold place)</td><td>/</td><td>1.63</td><td>0.69</td></tr><tr><td>Average</td><td>0.40</td><td>0.79</td><td>0.56</td></tr></table>
|
| 500 |
+
|
| 501 |
+
Table 12: Examples of results given by different compatibility test approaches. Only $u_{n-1}$ in the dialogue history is shown. Human annotations are normalized to the range of (0, 1) and scores given by "COMET (neural)" are scaled up by 100 times for better demonstration. Compatibility scores for each tuple are averaged to get the final score for the target response (in bold font).
|
| 502 |
+
|
| 503 |
+
two samples with a breakdown of tuple results. Compared with the compatibility scores given by ACCENT approach, the scores given by "COMET (neural)" are less comparable among tuples with different relations, thus making this method unsuitable for ACCENT framework.
|
| 504 |
+
|
| 505 |
+
# G Error Analysis
|
| 506 |
+
|
| 507 |
+
We conduct a qualitative analysis of the event-relation component in ACCENT. Table 13 shows some examples of the extracted tuples. While most of the head and tail events are nicely composed and capture the major information in the given text, they are not perfect. Multiple participants involved in the dialogue further increase the difficulty of the task. We note that the model sometimes confuses the multiple participants in the dialogue and makes mistakes when using "PersonX" or "PersonY". For example, in the third sample of Table 13, the model confuses different participants since the subjects of "raise money" and "go tomorrow for treatment" should be different. Such confusion will lead to the wrong tuples which cannot truly reflect the meaning of the dialogue. Also, identifying relation from the given dialogue is challenging. Although we include negative samples (dialogues that do not have a certain relation) when fine-tuning T5, errors still exist (e.g., the tuple with "xAttr" relation in the third sample of Table 13).
|
| 508 |
+
|
| 509 |
+
# Automatically extracted event-relation tuples
|
| 510 |
+
|
| 511 |
+
Dialogue History:
|
| 512 |
+
|
| 513 |
+
A: Cool. Are you religious? I have not told my parents I am wiccan yet.
|
| 514 |
+
|
| 515 |
+
B: I am the wife of a pastor. Spirituality is important in our home.
|
| 516 |
+
|
| 517 |
+
A: Oh. I grew up in a un religious home.
|
| 518 |
+
|
| 519 |
+
B: God is always around, I hope he does not mind that I do not like cleaning.
|
| 520 |
+
|
| 521 |
+
Target Response:
|
| 522 |
+
|
| 523 |
+
A: I'm not religious. I'm a Christian.
|
| 524 |
+
|
| 525 |
+
(PersonX not be religious, xAttr, PersonX be a Christian), (PersonX grew up in un religious home,xEffect, PersonX be a Christian), (PersonX like cleaning, isAfter, PersonX be a Christian), (PersonX like cleaning, HasSubEvent, PersonX be a Christian), (PersonX like cleaning, HinderedBy, PersonX be a Christian)
|
| 526 |
+
|
| 527 |
+
Dialogue History:
|
| 528 |
+
|
| 529 |
+
A: I'm dreadfully sorry for you.
|
| 530 |
+
|
| 531 |
+
B: Thank you very much for your kindness.
|
| 532 |
+
|
| 533 |
+
A: Is there any clue?
|
| 534 |
+
|
| 535 |
+
Target Response:
|
| 536 |
+
|
| 537 |
+
B: If I had a clue, I'd be happy to help.
|
| 538 |
+
|
| 539 |
+
(PersonX have a clue, xAttr, PersonX be happy to help), (PersonX has a clue, xReact, PersonX be happy to help), (PersonX has a clue, xEffect, PersonX be happy to help), (PersonX asks PersonY for a clue, oEffect, PersonY is happy to help), (PersonX help PersonY, HinderedBy, PersonX have a clue)
|
| 540 |
+
|
| 541 |
+
Dialogue History:
|
| 542 |
+
|
| 543 |
+
A: you mean if they catch the murderer. I'm scared stiff about going into the city center at night.
|
| 544 |
+
|
| 545 |
+
B: There must have been some good news in the newspaper. I can't believe that none of the news stories make you happy or excited.
|
| 546 |
+
|
| 547 |
+
A: Well, there was one good piece of news. You remember the local girl who was dying of a rare blood disease?
|
| 548 |
+
|
| 549 |
+
B: Yes. Her parents were raising money to have her treated in the united states.
|
| 550 |
+
|
| 551 |
+
Target Response:
|
| 552 |
+
|
| 553 |
+
A: Well, they've got the money and she's going tomorrow for treatment.
|
| 554 |
+
|
| 555 |
+
(PersonX raise money to have PersonY treated in United States, xAttr, PersonX go tomorrow for treatment), (PersonX raises money to have PersonY treated in United States, xReact, PersonX feels good), (PersonX raise money to have PersonY treated in United States, xEffect, PersonX go tomorrow for treatment), (PersonX raise money to have PersonY treated in United States, xWant, personX go tomorrow for treatment), (PersonX raise money to have PersonY treated in United States, xIntent, PersonX go tomorrow for treatment), (PersonX raise money to have PersonY treated in United States, oEffect, PersonY go tomorrow for treatment), (PersonX raises money to have PersonY treated in United States, oReact, PersonY feels happy), (PersonX raise money to have PersonY treated in United States, oWant, PersonY go tomorrow for treatment), (PersonX go to the United States, isAfter, The parents raise money to have the girl treated)
|
| 556 |
+
|
| 557 |
+
Dialogue History:
|
| 558 |
+
|
| 559 |
+
A: Yeah I know, he did a great job for the limited time, can you believe that 20th Century Fox let George Lucas keep the merchandising rights, that's what made him a billionaire, so crazy I don't think they would do that ever again.
|
| 560 |
+
|
| 561 |
+
B: I think I can believe it. Good for him, what does the corporation need with something they didn't invent anyways?
|
| 562 |
+
|
| 563 |
+
A: No I agree with you. Are you going to watch the Academy Awards by the way? They give out 24 Awards and I think that the first broadcast was back in 1930, which is wild
|
| 564 |
+
|
| 565 |
+
B: Yeah, so like the academy awards totally suck. Does anyone actually give a bleep what they think?
|
| 566 |
+
|
| 567 |
+
Target Response:
|
| 568 |
+
|
| 569 |
+
A: Well I can't speak for the other commenters but I'm sure there are people who give them a bleep can help winners go on to win awards and make money.
|
| 570 |
+
|
| 571 |
+
(PersonX give a bleep, HasSubEvent, PersonX help winners go on to win awards)
|
| 572 |
+
|
| 573 |
+
Table 13: Some examples of the automatically extracted event-relation tuples. These tuples function as the intermediate symbolic representation in ACCENT framework.
|
| 574 |
+
|
| 575 |
+
# Instructions (click to expand/collapse)
|
| 576 |
+
|
| 577 |
+
Thanks for participating in this HIT!
|
| 578 |
+
|
| 579 |
+
In this task, you need to extract events from the Response and its previous utterance according to the given relation. The expected outcome consists of 3 parts: Event A, Relation, Event B
|
| 580 |
+
|
| 581 |
+
Event A.Event B
|
| 582 |
+
|
| 583 |
+
Short phrases with a verb that may describe object properties, actions, etc.
|
| 584 |
+
|
| 585 |
+
For example, in "I have just participated in the 3000-meter race, so I feel very tired
|
| 586 |
+
|
| 587 |
+
now," we can find two events, I participate in the 3000-meter race and I feel tired.
|
| 588 |
+
|
| 589 |
+
Relation
|
| 590 |
+
|
| 591 |
+
How Event A relates to Event B?
|
| 592 |
+
|
| 593 |
+
In the former example, there exists a relation of "X wants" - "I feel tired" is the person's
|
| 594 |
+
|
| 595 |
+
reaction to "I participate in the 3000-meter race".
|
| 596 |
+
|
| 597 |
+
See detailed definitions and more examples below.
|
| 598 |
+
|
| 599 |
+
When composing Event A / Event B, please replace the person name or its pronoun with "PersonX". If there are additional people involved, replace them with "PersonY". For example,
|
| 600 |
+
|
| 601 |
+
# Source text
|
| 602 |
+
|
| 603 |
+
I have just participated in the 3000-meter race, so I
|
| 604 |
+
|
| 605 |
+
feel very tired now.
|
| 606 |
+
|
| 607 |
+
I'll invite my friends home.
|
| 608 |
+
|
| 609 |
+
# Event
|
| 610 |
+
|
| 611 |
+
PersonX participate in the 3000-meter race;
|
| 612 |
+
|
| 613 |
+
PersonX feel tired
|
| 614 |
+
|
| 615 |
+
PersonX invite PersonX's friends home
|
| 616 |
+
|
| 617 |
+
Sometimes, Event A / Event B may come from the sentence which doesn't have a subject. You need to add the subject yourself.
|
| 618 |
+
|
| 619 |
+
Here are the Relations you need to consider (more examples can be found below):
|
| 620 |
+
|
| 621 |
+
X effect
|
| 622 |
+
|
| 623 |
+
as a result, X will
|
| 624 |
+
|
| 625 |
+
Xreact
|
| 626 |
+
|
| 627 |
+
as a result, X feels
|
| 628 |
+
|
| 629 |
+
X intend
|
| 630 |
+
|
| 631 |
+
because X intends
|
| 632 |
+
|
| 633 |
+
Attention! A couple NOTES:
|
| 634 |
+
|
| 635 |
+
- The two events should come from the Response or its previous utterance. If both two events are from the Response, please select "Single"; if one event is from the Response and another one is from its previous utterance, please select "Pair".
|
| 636 |
+
- Don't worry about whether the Response make sense or not. Simply find events and their relations according to the text.
|
| 637 |
+
- In a given sample, you may be able to find multiple (Event 1, Relation, Event 2) triplets. Please write down all of them.
|
| 638 |
+
- If you cannot find any tuple after considering all the given relations, feel free to report it (see below).
|
| 639 |
+
|
| 640 |
+
# Relations (click to expand/collapse)
|
| 641 |
+
|
| 642 |
+
# Sample 1
|
| 643 |
+
|
| 644 |
+
A: $\S$ {dlg_history_1}
|
| 645 |
+
|
| 646 |
+
B:\{dlg_response_1}
|
| 647 |
+
|
| 648 |
+
Click "Add new triplet" button to start working. Please write down all triplets you can find. If you really cannot find any triplet, please tick the check box below.
|
| 649 |
+
|
| 650 |
+
□ I have carefully consider all relations but I cannot find any triplet.
|
| 651 |
+
|
| 652 |
+
# Relation
|
| 653 |
+
|
| 654 |
+
# Event 1
|
| 655 |
+
|
| 656 |
+
# Event 2
|
| 657 |
+
|
| 658 |
+
# Evidence comes from
|
| 659 |
+
|
| 660 |
+
X effect X react X intend
|
| 661 |
+
|
| 662 |
+
Event 1
|
| 663 |
+
|
| 664 |
+
Event 2
|
| 665 |
+
|
| 666 |
+
Single
|
| 667 |
+
|
| 668 |
+
Pair
|
| 669 |
+
|
| 670 |
+

|
| 671 |
+
Figure 8: Mechanical Turk template used to collect event-relation tuples. This template considers "xEffect", "xReact", "xIntent" relations.
|
| 672 |
+
|
| 673 |
+
# Add new triplet
|
| 674 |
+
|
| 675 |
+
# A For every submission:
|
| 676 |
+
|
| 677 |
+
A1. Did you describe the limitations of your work?
|
| 678 |
+
|
| 679 |
+
Sections 6.2, 6.4 and Appendix H include the error analysis of the ACCENT framework. A separate section of "Limitations" is also included in Appendix A.
|
| 680 |
+
|
| 681 |
+
A2. Did you discuss any potential risks of your work?
|
| 682 |
+
|
| 683 |
+
We do not include potential risks of our work, since we believe that none of the components in our model and the dataset by itself can produce offensive results. The responses generated and augmented to DECO dataset are coming from previously proposed state-of-the-art models which are trained on datasets without profanity and inappropriate utterances. Other than that, our work is pertinent to evaluation and has less feasibility of potential risks.
|
| 684 |
+
|
| 685 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 686 |
+
|
| 687 |
+
Abstract and Introduction summarize the paper's main claims. Three experimental setups and results show the ACCENT's superiority versus baselines.
|
| 688 |
+
|
| 689 |
+
A4. Have you used AI writing assistants when working on this paper?
|
| 690 |
+
|
| 691 |
+
Left blank.
|
| 692 |
+
|
| 693 |
+
# B Did you use or create scientific artifacts?
|
| 694 |
+
|
| 695 |
+
Left blank.
|
| 696 |
+
|
| 697 |
+
B1. Did you cite the creators of artifacts you used?
|
| 698 |
+
|
| 699 |
+
In sections 5.1 and 6.3, we cite the CSKB population benchmark. Section 4.1 has the citations for each of the dialogue models used for the DECO response generation.
|
| 700 |
+
|
| 701 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts?
|
| 702 |
+
|
| 703 |
+
Some dialogue sources we used come with licence. We discuss them in Appendix D.3.
|
| 704 |
+
|
| 705 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?
|
| 706 |
+
|
| 707 |
+
Sections 5.1 and 6.3 use existing CSKB population benchmark that are compatible with the conditions that data was granted to be used. Our collected data is anonymized and section 4 describes the data use cases and statements.
|
| 708 |
+
|
| 709 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 710 |
+
|
| 711 |
+
Section 4 is mainly about data collection and creation, it discusses the data included in the collection process which are human judgments and it doesn't reveal the identity of users participated in the data collection process. The inputs for evaluation are coming from existing human-written conversational data and responses generated by dialogue models do not include inappropriate content.
|
| 712 |
+
|
| 713 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?
|
| 714 |
+
|
| 715 |
+
Appendix D includes the language of collected dataset alongside the information about the data and its details.
|
| 716 |
+
|
| 717 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 718 |
+
|
| 719 |
+
Section 4 and Section D.2 in the Appendix are about the size of datasets.
|
| 720 |
+
|
| 721 |
+
# C Did you run computational experiments?
|
| 722 |
+
|
| 723 |
+
Left blank.
|
| 724 |
+
|
| 725 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?
|
| 726 |
+
|
| 727 |
+
Appendix E.3 reports the computational resources.
|
| 728 |
+
|
| 729 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values?
|
| 730 |
+
|
| 731 |
+
Yes we discussed them in Appendix E.
|
| 732 |
+
|
| 733 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 734 |
+
|
| 735 |
+
Sections 5 and 6 include the descriptive statistics about your results. Table 1 also demonstrates the results for ACCENT that all are significant $(p < 0.05)$ .
|
| 736 |
+
|
| 737 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?
|
| 738 |
+
|
| 739 |
+
Left blank.
|
| 740 |
+
|
| 741 |
+
# D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 742 |
+
|
| 743 |
+
Left blank.
|
| 744 |
+
|
| 745 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.?
|
| 746 |
+
|
| 747 |
+
Section 4 and Appendix D contain all the details about human annotations and conducted experiments.
|
| 748 |
+
|
| 749 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)?
|
| 750 |
+
|
| 751 |
+
The crowdsourcing platform is discussed in Section 4, and we discuss how we pay the annotators in Appendix D.1.
|
| 752 |
+
|
| 753 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used?
|
| 754 |
+
|
| 755 |
+
Yes, it was indicated in the AMT HIT pages.
|
| 756 |
+
|
| 757 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board?
|
| 758 |
+
|
| 759 |
+
Not applicable. We did not include ethics review board for out data collection process.
|
| 760 |
+
|
| 761 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data?
|
| 762 |
+
|
| 763 |
+
Appendix D.1 reports the basic demographic and geographic characteristics of the annotator population.
|
accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2aa7e994b5fbca987264587f4d6d87bf4da30b1a8567719f1179bab26be772c2
|
| 3 |
+
size 814426
|
accentanautomaticeventcommonsenseevaluationmetricforopendomaindialoguesystems/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0432f837d24e9e3f674e58dffb42add39b7a73c099e30663b967f2bcf8d0d20
|
| 3 |
+
size 773680
|
aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/0b69e674-6367-45fa-a4f3-60843206c20e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77969e57ec1e3a8137f81439f9e363ff75c8eaee4d16744b3f88bccc24955824
|
| 3 |
+
size 159838
|
aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/0b69e674-6367-45fa-a4f3-60843206c20e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:339a7ab006397fa72b0d38cad113fa757445f2d115368c4c058ec49962562a30
|
| 3 |
+
size 181963
|
aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/0b69e674-6367-45fa-a4f3-60843206c20e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f7844bb690f6fe9e3e17a7af75b16173d2c0b077f0eb7d630453d24c7b66decd
|
| 3 |
+
size 3927672
|
aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13b2e090a9cd3973eff19724baa77d00cb32950a5080347e24ebed9b743e48b0
|
| 3 |
+
size 2376614
|
aclmaselectivedenoisingbasedgenerativedataaugmentationapproachforlowresourcecomplexner/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:63128551c75414f81e230d3ef9feefaa58150ab9c65754803e4b87368df95eeb
|
| 3 |
+
size 590108
|
activelysupervisedclusteringforopenrelationextraction/b96a6d17-18bc-44ec-b68e-7c0258200d66_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:246a2a6059978f077efb34698dea9f8672010536dc0a28ab0f49d03562781606
|
| 3 |
+
size 94611
|
activelysupervisedclusteringforopenrelationextraction/b96a6d17-18bc-44ec-b68e-7c0258200d66_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:489cca5eb20731002eec5debb7ccbe56f9b489c4b8fa1aa8a3a315ae61f01c14
|
| 3 |
+
size 110742
|
activelysupervisedclusteringforopenrelationextraction/b96a6d17-18bc-44ec-b68e-7c0258200d66_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:22b17dd24c7433f3d5c1e0de5aedb1b1f423e8048d520e63807672a9529a5a86
|
| 3 |
+
size 906804
|
activelysupervisedclusteringforopenrelationextraction/full.md
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Actively Supervised Clustering for Open Relation Extraction
|
| 2 |
+
|
| 3 |
+
Jun Zhao $^{1*}$ , Yongxin Zhang $^{1*}$ , Qi Zhang $^{1†}$ , Tao Gui $^{2†}$ , Zhongyu Wei $^{3}$ , Minlong Peng $^{4}$ , Mingming Sun $^{4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ School of Computer Science, Fudan University
|
| 6 |
+
|
| 7 |
+
$^{2}$ Institute of Modern Languages and Linguistics, Fudan University
|
| 8 |
+
|
| 9 |
+
$^{3}$ School of Data Science, Fudan University
|
| 10 |
+
|
| 11 |
+
Cognitive Computing Lab Baidu Research
|
| 12 |
+
|
| 13 |
+
{zhaoj19,yongxinzhang20,qz,tgui,zywei}@fudan.edu.cn
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
Current clustering-based Open Relation Extraction (OpenRE) methods usually adopt a two-stage pipeline. The first stage simultaneously learns relation representations and assignments. The second stage manually labels several instances and thus names the relation for each cluster. However, unsupervised objectives struggle to optimize the model to derive accurate clustering assignments, and the number of clusters has to be supplied in advance. In this paper, we present a novel setting, named actively supervised clustering for OpenRE. Our insight lies in that clustering learning and relation labeling can be alternately performed, providing the necessary guidance for clustering without a significant increase in human effort. The key to the setting is selecting which instances to label. Instead of using classical active labeling strategies designed for fixed known classes, we propose a new strategy, which is applicable to dynamically discover clusters of unknown relations. Experimental results show that our method is able to discover almost all relational clusters in the data and improve the SOTA methods by $10.3\%$ and $5.2\%$ , on two datasets respectively.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
Relation extraction (RE) aims to detect and extract the potential relation between the given entity pair in unstructured text. The extracted relation facts play a vital role in many downstream applications, such as knowledge base population (Ji and Grishman, 2011), search engine (Schlichtkrull et al., 2018), and question answering (Yu et al., 2017). To deal with the emerging unknown relational types in the real world, Open Relation Extraction (OpenRE) has been widely studied.
|
| 22 |
+
|
| 23 |
+
The clustering-based unsupervised relation discovery is a classical paradigm for OpenRE (Yao
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Unsupervised two-stage OpenRE
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Actively Supervised OpenRE
|
| 30 |
+
Figure 1: Compared with the existing unsupervised two-stage methods, our method can provide explicit supervision for clustering by alternately performing clustering learning and relation labeling. Note that the human effort of the two settings is comparable.
|
| 31 |
+
|
| 32 |
+
et al., 2011; Marcheggiani and Titov, 2016; Elsahar et al., 2017). It can discover potential relations, by grouping several instances into relational clusters, and then manually labeling a few instances to name the relation of each cluster. Recently, Hu et al. (2020) introduced a deep clustering framework (Caron et al., 2018) into OpenRE. They iteratively cluster the relation representations that are produced by large pretrained models and use the cluster assignments as pseudo-labels to refine the representations. Unfortunately, the above unsupervised methods struggle to learn good enough representations, and the cluster assignments are error-prone. When multiple relations are mixed in a cluster, it becomes difficult to name the cluster. Hence, instead of regarding OpenRE as a totally unsupervised task, researchers leverage the labeled data of predefined relations to provide explicit supervision signals for clustering learning (Wu et al., 2019; Zhao et al., 2021), and achieve superior results.
|
| 33 |
+
|
| 34 |
+
Different from the above two-stage methods, in this work, we present a new setting named actively supervised clustering for OpenRE (ASCORE). As
|
| 35 |
+
|
| 36 |
+
shown in fig. 1, our insight lies in that clustering learning (i.e., deep clustering) and relation labeling can be alternately performed. In an iteration, a small number of key instances are selected for labeling. The unknown relations expressed by these instances are correspondingly discovered. More importantly, these labeled instances can provide explicit supervisory signals for clustering learning. The improved relation representations form a better cluster structure, which in turn is able to benefit the discovery of the neglected relations. Since potential relations are dynamically discovered in iterations, the number of clusters does not need to be provided in advance.
|
| 37 |
+
|
| 38 |
+
Along with this setting, we design an active labeling strategy tailored for clustering. First, all instances are encoded to points in the representation space, where the clustering is performed. The goal of the strategy is to select the most informative points for labeling. Intuitively, two points that are far from each other in representation space usually express different relations. To discover as many relations as possible, we introduce a distance regularization to the strategy, so that diversified relation discovery can be facilitated. To prevent over-fitting caused by training with limited active labeled instances, all the selected key points are required to be the points of maximum local density. By doing so, a large number of high-quality pseudo-labels can be obtained, by assigning active labels to unlabeled data in a small neighborhood. To mitigate the error propagation issue, different loss functions are assigned to active labels and pseudo-labels with different reliability for clustering learning. Experimental results show that (1) the actively supervised method improves the SOTA two-stage methods by a large margin without a significant increase in human effort. (2) the proposed active strategy can discover more relational clusters, compared with the classical active strategy.
|
| 39 |
+
|
| 40 |
+
To summarize, the main contributions of this work are as follows: (1) We present a new setting named actively supervised clustering for OpenRE, providing the necessary guidance for clustering without a significant increase in human effort. (2) Design of a new active labeling strategy tailored for clustering, that can effectively discover potential relational clusters in unlabeled data. (3) This method improves the SOTA two-stage methods by $10.3\%$ and $5.2\%$ on two well-known datasets,
|
| 41 |
+
|
| 42 |
+
respectively.
|
| 43 |
+
|
| 44 |
+
# 2 Related Work
|
| 45 |
+
|
| 46 |
+
Clustering-based OpenRE: The clustering-based paradigm considers relation discovery as a two-stage pipeline, which clusters relational data first, and then manually labels relational semantics for each cluster. Conventional methods cluster instances by human-defined linguistic features (Yao et al., 2011; Marcheggiani and Titov, 2016; Elsahar et al., 2017), such as entity words/type, dependency paths, trigger words, context POS tags. Recently, many studies have shown that pretrained models learn diversified linguistic knowledge (Jawahar et al., 2019; Clark et al., 2019; Goldberg, 2019; Zhao et al., 2022). Hu et al. (2020) leverage the self-supervised signals provided by the pretrained model to iteratively learn relation representations and optimize clustering. Due to the lack of strong supervision, it is difficult for the above methods to produce satisfactory clustering results. Although some works (Wu et al., 2019; Zhao et al., 2021) try to use the labeled data of predefined relations to complete the missing supervision, the semantic gap between predefined and open relations leads to negative clustering bias, especially when these relations come from different domains (Zhao et al., 2021). By performing clustering learning and relation labeling alternately, our actively supervised method can provide strong supervision and improve the two-stage methods by a large margin. In the main results (sec. 5), we achieve this improvement at the cost of only two active labels for each relation on average. For two-stage methods, relation labeling for each cluster requires at least one (usually more) instance to be manually observed. Therefore, there is no significant increase in human effort.
|
| 47 |
+
|
| 48 |
+
Active Learning: Active learning is a research field with high relevance to the proposed methods. In the research field, a classical method is uncertainty-based sampling(Roth and Small, 2006; Wang and Shang, 2014a; Tong and Koller, 2001). The uncertainty can be defined based on the posterior probability of a predicted class or the distances to the decision boundaries. In the context of deep learning, MC Dropout (Gal et al., 2017) is an effective way for uncertainty estimation, but the computationally inefficient limits its application in large-scale datasets. Recently, representative sampling is attracting lots of attention (Sener and
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
Figure 2: Overview of the training pipeline for our actively supervised clustering setting. In each iteration, a few key points are selected for relation labeling. The rest instances are clustered to the nearest key points. Some highly reliable cluster assignments are used as pseudo-labels for relation representation learning.
|
| 52 |
+
|
| 53 |
+
Savarese, 2018; Ash et al., 2019a), which selects data points that represent the distribution of an unlabeled pool. Unlike the classical labeling strategies designed for fixed classes, the proposed strategy is encouraged to discover new relational clusters while improving the clustering of relations that have been discovered.
|
| 54 |
+
|
| 55 |
+
# 3 Approach
|
| 56 |
+
|
| 57 |
+
In this work, we present a new setting named actively supervised clustering for OpenRE (AS-CORE), which fuses the isolated two-stage pipeline to guide clustering learning. Fig. 2 illustrates the training pipeline. The OpenRE problem addressed in this work is formally stated as follows. Given as input an open relational dataset $\mathcal{D} = \{x_i|i = 1,\dots ,N\}$ , the goal is to discover and label potential relations $\mathcal{R} = \{r_i|i = 1,\dots ,K\}$ in the open data and cluster the corresponding instances. Note that the number of relations $K$ in $\mathcal{D}$ is unknown.
|
| 58 |
+
|
| 59 |
+
# 3.1 Overview
|
| 60 |
+
|
| 61 |
+
The ASCORE is based on deep clustering (Caron et al., 2018), which is a common practice for OpenRE (Hu et al., 2020; Zhao et al., 2021), that iteratively clusters the representations of input instances, and uses the cluster assignments as pseudo-labels to learn the relation representations. We introduce explicit supervision to deep clustering by alternately performing clustering learning and relation labeling. The actively labeled points can serve as a basis to facilitate accurate pseudo-label estimation and improve representation learning. The improved relational representation in turn
|
| 62 |
+
|
| 63 |
+
benefits relation labeling to discover more relations. As illustrated in Figure 2, the training pipeline of ASCORE consists of the following steps:
|
| 64 |
+
|
| 65 |
+
Encoding Step: This step aims to obtain the relation representation $h_i$ of each input instance $x_i \in \mathcal{D}$ , laying the groundwork for clustering and relation discover. First, the contextual information of $x_i$ is encoded to the entity pair representation $h_i^{ent}$ , using a pretrained BERT (Devlin et al., 2018) encoder. To avoid data sparsity and low efficiency of clustering in high-dimensional space, an autoencoder is used to transform $h_i^{ent}$ into a low-dimensional clustering-friendly representation $h_i$ .
|
| 66 |
+
|
| 67 |
+
Labeling Step: This step aims to discover potential relations in open dataset $\mathcal{D}$ and guide clustering. At the $s^{\mathrm{th}}$ iteration, a set $\mathcal{D}_s^* \in \mathcal{D}$ is actively labeled, including $B$ key points. These key points are required to be local maximum in density, so a large number of high-quality pseudo labels can be obtained by assigning active labels to unlabeled data in a small neighborhood. Instead of focusing only on the improving clustering of the discovered relations, all key points in $\mathcal{D}^* = \mathcal{D}_1^* \cup \dots \cup \mathcal{D}_s^*$ are required to be far away from each other to facilitate the discovery of new relations.
|
| 68 |
+
|
| 69 |
+
Learning Step: This step aims to learn clustering relational data, using an actively labeled $\mathcal{D}^*$ . Specifically, each unlabeled point $x_{i}\in \mathcal{D}$ is clustered to the nearest key point $x_{j}^{*}\in \mathcal{D}^{*}$ and the pseudo label is $\hat{y}_i = y_j^*$ . The reliability of $\hat{y}_i$ increases as the distance in representation space between $x_{i}$ and $x_{j}^{*}$ decreases. Cross-entropy loss (resp. divergence-based contrastive loss) is
|
| 70 |
+
|
| 71 |
+
used for pseudo labels with high (resp. moderate) reliability, to optimize relation representations and thus to improve clustering. With the help of active supervision, separated subclusters expressing the same relation approach each other, while mixed subclusters expressing different relations are separated. Existing unsupervised methods are inherently difficult to handle such errors.
|
| 72 |
+
|
| 73 |
+
The above three steps are performed iteratively to gradually improve the model performance. In the following sections, we will elaborate on the model structure, labeling strategy, and training methods involved in the above three steps.
|
| 74 |
+
|
| 75 |
+
# 3.2 Relation Representation Encoder
|
| 76 |
+
|
| 77 |
+
Given a relation instance $x_{i} = \{w_{1}, w_{2}, \dots, w_{n}\}$ , in which four reserved word pieces $[E1], [\backslash E1], [E2], [\backslash E2]$ are used to mark the beginning and end of each entity, the relation representation encoder $f$ aims to encode the contextual relational information of instance $x_{i}$ into a fixed-length representation $h_{i}^{ent} = f(x_{i}) \in \mathbb{R}^{d}$ . The encoder $f$ is implemented as BERT. Specifically:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\boldsymbol {h} _ {1}, \dots , \boldsymbol {h} _ {n} = \operatorname {B E R T} \left(w _ {1}, \dots , w _ {n}\right) \tag {1}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\boldsymbol {h} ^ {\text {e n t}} = \left\langle \boldsymbol {h} _ {[ E 1 ]} \mid \boldsymbol {h} _ {[ E 2 ]} \right\rangle . \tag {2}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
Following Soares et al. (2019), the fixed-length representations are obtained, by concatenating the hidden states of marker $[E1],[E2]$ (i.e., $h_{[E1]}$ , $h_{[E2]}$ ) and $\langle \cdot |\cdot \rangle$ is the concatenation operator.
|
| 88 |
+
|
| 89 |
+
However, clustering data in high-dimensional space is time-consuming and the data sparsity leads to sub-optimal clustering results. Therefore, an autoencoder is trained by reconstruction loss $\mathcal{L}_{rec}$ and the encoder part is retained to transform high-dimensional $h^{ent}$ to a low-dimensional clustering-friendly relation representation $h$ .
|
| 90 |
+
|
| 91 |
+
# 3.3 Key Point Selection Module
|
| 92 |
+
|
| 93 |
+
In this section, the proposed key point selection method will be explained, including the labeling strategy and the conditions for stopping annotation. Labeling strategy. The labeling strategy is based on the following criteria. First, the selected key points are the local maximum in density. Generally, labels do not drastically change within a small neighborhood, and therefore, the first criteria enable to find a lot of unlabeled data within a small neighborhood of each key point, and accurately estimate their pseudo-labels. To find these local maximum, it is calculated the euclidean distance
|
| 94 |
+
|
| 95 |
+
between the relation representations $\{\pmb{h}_i\}_{i = 1,2,\dots,N}$ obtained in encoding step, and the distance matrix $D\in \mathbb{R}^{N\times N}$ is constructed as follows:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\boldsymbol {D} _ {i j} = \left\| \boldsymbol {h} _ {i} - \boldsymbol {h} _ {j} \right\| _ {2} ^ {2}, \tag {3}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
where $D_{ij}$ is the distance between two relational instance $x_{i}$ and $x_{j}$ . The potential computational cost to process large-scale datasets can be solved by sampling a small subset. Based on distance matrix $\pmb{D}$ , a density $\rho_{i}$ is further defined for each relation instance $x_{i}$ . A larger $\rho_{i}$ indicates a larger number of instances around $x_{i}$ :
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\rho_ {i} = \sum_ {j = 1} ^ {N} \operatorname {s i g n} \left(D _ {c} - D _ {i j}\right), \tag {4}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where $\text{sign}()$ is the sign function and $D_{c}$ is a threshold.
|
| 108 |
+
|
| 109 |
+
To avoid the problem that all the labeled points are concentrated in several high-density areas and missing most long-tail relations, the second criteria is to keep these key points away from each other in clustering space. Specifically, a sparsity index $\xi_{i}$ is defined for each instance $x_{i} \in \mathcal{D}$ .
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
\xi_ {i} = \left\{ \begin{array}{l l} \min _ {j, \rho_ {j} > \rho_ {i}} D _ {i j}, & \rho_ {i} < \rho_ {\max } \\ \max _ {j} D _ {i j} & \rho_ {i} = \rho_ {\max } \end{array} \right. \tag {5}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
Intuitively, a larger $\xi_{i}$ indicates that the instance $x_{i}$ is a local maximum of density in a larger radius. Based on the density $\rho_{i}$ and sparsity index $\xi_{i}$ of each instance, the labeling strategy can be formally stated as follows. In each iteration, choose $B$ points with the highest density and their distance from each other is greater than $\xi_{c}$ .
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\mathcal {D} _ {s} ^ {*} = \operatorname {T o p B} _ {\rho} \left\{x _ {i} \mid \xi_ {i} > \xi_ {c}, x _ {i} \in \mathcal {D} \right\} \tag {6}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
To effectively support iterative labeling and maintain the diversity of key points, in $s^{\mathrm{th}}$ iteration, each new key point $x_{i}$ should be as far away from the existing key point in $\mathcal{D}^{*} = \mathcal{D}_{1}^{*}\cup \ldots \cup \mathcal{D}_{s - 1}$ as possible. Therefore, for each instance $x_{i}$ , the sparsity index are modified as follows:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
d = \min _ {x _ {j} \in \mathcal {D} ^ {*}} \| \boldsymbol {h} _ {i} - \boldsymbol {h} _ {j} \| _ {2} ^ {2} \tag {7}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\left. \xi_ {i} = \min \left(\xi_ {i}, d\right), \right. \tag {8}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
After the $s^{\mathrm{th}}$ iterations, the result is the new active labeled set $\mathcal{D}^{*} = \mathcal{D}^{*}\cup \mathcal{D}_{s}^{*}$
|
| 132 |
+
|
| 133 |
+
Conditions for stopping annotation. Too few queries will lead to missing some relations, while
|
| 134 |
+
|
| 135 |
+
too many queries will lead to unnecessary costs. Here we give a simple strategy to determine when to stop labeling. (1) First, users can determine the maximum number of actively labeled instances, $N^{*}$ , based on their annotation budget. (2) Since our labeling strategy takes into account the diversity of key points, new relations are constantly discovered during the initial phase of iteration. When no new relations are discovered in two or more consecutive iteration steps (it means that most relations have been found), the labeling should be stopped.
|
| 136 |
+
|
| 137 |
+
# 3.4 Training Methods
|
| 138 |
+
|
| 139 |
+
Pseudo Label Estimation. Given the active labeled set $\mathcal{D}^*$ , each of the rest unlabeled points $x_{i}\in \mathcal{D}$ is clustered to the nearest key point $x_{j}^{*}\in \mathcal{D}^{*}$ and $\hat{y}_i$ is estimated as $y_j^*$ . Intuitively, the accuracy of the pseudo label decreases with the increase of the distance between $x_{i}$ and $x_{j}^{*}$ . The reliability $r$ of pseudo labels is defined as follows:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
r _ {i} = \left\| \boldsymbol {h} _ {i} - \boldsymbol {h} _ {j} ^ {*} \right\| _ {2} ^ {- 1}, \tag {9}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
where $\pmb{h}_i$ and $\pmb{h}_j^*$ denote the representation of $x_i$ and $x_j^*$ , respectively. $\| \cdot \|_2^{-1}$ denotes reciprocal of $L_2$ norm.
|
| 146 |
+
|
| 147 |
+
Model Optimization. Given the pseudo label $\hat{y}_i$ and its reliability $r_i$ for each unlabeled data $x_i \in \mathcal{D}$ , the relation representation is refined to improve clustering in the next iteration. Specifically, we first filter out a highly reliable subset $\mathcal{D}_h = \{(x_i, \hat{y}_i) | r_i > r_h\}$ and use a softmax classifier to convert entity pair representation $h_i^{ent}$ into the probability distribution on discovered relations (denoted as $\mathcal{P}_i$ ). The model is optimized with cross entropy loss for fast convergence:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\mathcal {L} _ {c e} = \text {C r o s s E n t r o p y} (\hat {y} _ {i}, \mathcal {P} _ {i}). \tag {10}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
Note that the number of instances in $\mathcal{D}_h$ is small. To avoid that the model only learns simple features, the threshold is broaden, and a moderately reliable subset $\mathcal{D}_m = \{(x_i,\hat{y}_i)|r_i > r_m\}$ containing more instances is built. To mitigate the negative impact of noise in $\mathcal{D}_m$ , a binary contrastive loss is further introduced:
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
\mathcal {L} _ {b c e} = \mathcal {L} _ {\hat {y} _ {i} = \hat {y} _ {j}} + \mathcal {L} _ {\hat {y} _ {i} \neq \hat {y} _ {j}} \tag {11}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
\mathcal {L} _ {\hat {y} _ {i} = \hat {y} _ {j}} = \mathcal {D} _ {k l} \left(\mathcal {P} _ {i} ^ {*} \mid \mid \mathcal {P} _ {j}\right) + \mathcal {D} _ {k l} \left(\mathcal {P} _ {i} \mid \mid \mathcal {P} _ {j} ^ {*}\right) \tag {12}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\mathcal {L} _ {\hat {y} _ {i} \neq \hat {y} _ {j}} = H _ {\sigma} \left(\mathcal {D} _ {k l} \left(\mathcal {P} _ {i} ^ {*} \mid \mid \mathcal {P} _ {j}\right)\right) + H _ {\sigma} \left(\mathcal {D} _ {k l} \left(\mathcal {P} _ {i} \mid \mid \mathcal {P} _ {j} ^ {*}\right)\right) \tag {13}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
H _ {\sigma} (x) = \max (0, \sigma - x), \tag {14}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
# Algorithm 1: ASCORE
|
| 172 |
+
|
| 173 |
+
Input: A open dataset $\mathcal{D} = \{x_i\}_{i=1}^N$
|
| 174 |
+
1 repeat
|
| 175 |
+
2 Perform the encoding step (sec. 3.2). Get relation representation $\{h_i\}_{i=1}^N$ for instances in $\mathcal{D}$ ;
|
| 176 |
+
3 if The conditions for stopping annotation are not met then
|
| 177 |
+
4 Perform the labeling step (sec. 3.3). Get the $s^{\text{th}}$ active labeled $\mathcal{D}^* = \mathcal{D}^* \cup \mathcal{D}_s$ ;
|
| 178 |
+
5 end
|
| 179 |
+
6 Perform the learning step (sec. 3.4). Estimate pseudo labels and reliability scores. Use the corresponding loss for representation learning.
|
| 180 |
+
7 until convergence;
|
| 181 |
+
8 Return Discovered relation set $\mathcal{R}$ and the cluster assignment $\hat{y}_i \in \mathcal{R}$ of each instance $x_i \in \mathcal{D}$ .
|
| 182 |
+
|
| 183 |
+
where $\sigma$ is a hyperparameter and $\mathcal{P}^*$ denotes that $\mathcal{P}$ is assumed to be a constant for asymmetry. $\mathcal{D}_{kl}$ denotes KL divergence. The probability distribution $\mathcal{P}$ will be pulled closer or farther depending on whether the labels of the sample pairs are the same. In each iteration, if the annotator finds new relations, the parameters of the softmax classifier are reinitialized to deal with the new relations. Alg.1 shows an algorithm flow that is able to clearly summarize the proposed method.
|
| 184 |
+
|
| 185 |
+
# 4 Experimental Setup
|
| 186 |
+
|
| 187 |
+
# 4.1 Datasets
|
| 188 |
+
|
| 189 |
+
Experiments are conducted on two standard and one constructed dataset. Note that the compared baselines follow different settings. As will be described in sec. 4.2, RSN and RoCORE leverage labeled data of predefined relations, while RW-HAC and SelfORE follow unsupervised setting. To fairly compare all methods in a uniform setting, the first half of the relations in each dataset are held out as predefined relations. Specifically, in TACRED, 21 relations are held out, while in FewRel and FewRel-LT, the number is 40.
|
| 190 |
+
|
| 191 |
+
TACRED (Zhang et al., 2017). TACRED is a large-scale manually annotated RE dataset, covering 41 relations. Following Wu et al. (2019); Hu et al. (2020); Zhao et al. (2021), the instances labeled as no Relation are removed and the rest are used
|
| 192 |
+
|
| 193 |
+
for training and evaluation.
|
| 194 |
+
|
| 195 |
+
FewRel (Han et al., 2018). FewRel is a manually annotated dataset that contains 80 types of relations, each of which has 700 instances. However, in real-world OpenRE scenarios, unseen relations in unlabeled data usually follow a long-tailed distribution. To eliminate this inconsistency and accurately evaluate model performance in real-world scenarios, we construct a long-tail FewRel dataset as follows.
|
| 196 |
+
|
| 197 |
+
FewRel-LT. The FewRel-LongTail dataset. We number the 40 unseen relations in FewRel from 0 to 39, and calculate the number of samples according to $y = \frac{700}{0.5 * id + 1}$ . The number of samples in the predefined subset remains unchanged.
|
| 198 |
+
|
| 199 |
+
# 4.2 Compared Methods
|
| 200 |
+
|
| 201 |
+
To evaluate the effectiveness of the actively supervised clustering, the following SOTA two-stage OpenRE methods are used for comparison.
|
| 202 |
+
|
| 203 |
+
HAC with Re-weighted Word Embeddings (RW-HAC) (Elsahar et al., 2017). A clustering-based OpenRE method. The model constructs relational feature based on entity types and the weighted sum of pretrained word embeddings.
|
| 204 |
+
|
| 205 |
+
Relational Siamese Network (RSN) (Wu et al., 2019). This method learns similarity metrics of relations from labeled data of pre-defined relations and then transfers the relational knowledge to identify novel relations in unlabeled data.
|
| 206 |
+
|
| 207 |
+
Self-supervised Feature Learning for OpenRE (SelfORE) (Hu et al., 2020). SelfORE exploits weak, self-supervised signals in pretrained language model for adaptive clustering on contextualized relational features.
|
| 208 |
+
|
| 209 |
+
A Relation-oriented Clustering Method (Ro-CORE) (Zhao et al., 2021). RoCORE leverages the labeled data of predefined relations to learn a clustering-friendly representation, which is used for new relations discovery.
|
| 210 |
+
|
| 211 |
+
To show the superiority of the proposed labeling strategy, the actively supervised clustering is combined with the following classical active learning strategies for comparison. Specifically, RANDOM, CONFIDENCE (Wang and Shang, 2014b), MARGIN (Roth and Small, 2006), ENTROPY (Wang and Shang, 2014a) and GRADIENT (Ash et al., 2019b) are included. We provide a brief introduction to these methods in appendix A.1.
|
| 212 |
+
|
| 213 |
+
# 4.3 Implementation Details
|
| 214 |
+
|
| 215 |
+
Following Hu et al. (2020) and Wu et al. (2019), $20\%$ of the data in each dataset are held out for validation and hyperparameter selection. We use the Adam (Kingma and Ba, 2014) as the optimizer, with a learning rate of $1e - 4$ and batch size of 100 for all datasets. The threshold $D_{c}$ is given by the value of an element ranked top $40\%$ in D from large to small. For each iteration, we label $B = 20$ samples. $\xi$ is set to the value when the number of candidates is $1.2B$ . Some important hyperparameters $r_h$ and $r_m$ are analyzed in sec. 6.3. For a fair comparison, all active strategies select the same number of key points for labeling. Specifically, 40, 80 and 80 key points are labeled on the three datasets TACRED, FewRel, and FewRelLT respectively. All experiments are conducted with Pytorch 1.7.0, using an NVIDIA GeForce RTX 3090 with 24GB memory.
|
| 216 |
+
|
| 217 |
+
# 5 Main Results
|
| 218 |
+
|
| 219 |
+
Table 1 shows the model performances on three datasets. In this section, the experiment focuses on the following two questions.
|
| 220 |
+
|
| 221 |
+
# 5.1 Does inaccurate estimation of the number of relations have an impact on clustering?
|
| 222 |
+
|
| 223 |
+
One drawback of the most existing two-stage OpenRE methods is that the number of clusters $K$ has to be given in advance, which is impractical in real applications. When $K$ is underestimated (from to), the clustering performance of the SOTA unsupervised method, SelfORE, on the three datasets decreases by an average of $7.13\%$ , while the same metric regarding RoCORE, the SOTA supervised method, is $18.10\%$ . Furthermore, it is observed an extremely unbalanced precision-recall value in the $B^3$ metric (much lower precision and higher recall), which indicates that the model tends to mix multiple relations in the same cluster. Obviously, such clustering results will have a negative impact on the relation labeling. In other words, it is difficult to determine which relation this cluster corresponds to. When $K$ is overestimated (due to space limitation, please look at table 4 for results of overestimation), the same relation tends to be clustered into multiple subclusters. Repeated labeling of these subclusters brings a significant increase in human effort. In contrast, the ASCORE dynamically discovers relational clusters through active iteration, breaking the impractical
|
| 224 |
+
|
| 225 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td rowspan="2">Setting</td><td colspan="3">B3</td><td colspan="3">V-measure</td><td rowspan="2">ARI</td><td colspan="3">Classification</td></tr><tr><td>Prec.</td><td>Rec.</td><td>F1</td><td>Hom.</td><td>Comp.</td><td>F1</td><td>Prec.</td><td>Rec.</td><td>F1</td></tr><tr><td rowspan="7">TACRED</td><td>RW-HAC (Elsalah et al., 2017)</td><td>U</td><td>0.317</td><td>0.668</td><td>0.430</td><td>0.443</td><td>0.668</td><td>0.532</td><td>0.291</td><td>0.244</td><td>0.246</td><td>0.171</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.339</td><td>0.759</td><td>0.469</td><td>0.468</td><td>0.809</td><td>0.593</td><td>0.412</td><td>0.121</td><td>0.244</td><td>0.160</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.517</td><td>0.441</td><td>0.476</td><td>0.631</td><td>0.600</td><td>0.615</td><td>0.434</td><td>0.343</td><td>0.396</td><td>0.360</td></tr><tr><td>RSN (Wu et al., 2019)</td><td>P</td><td>0.312</td><td>0.807</td><td>0.451</td><td>0.445</td><td>0.768</td><td>0.563</td><td>0.354</td><td>0.149</td><td>0.118</td><td>0.225</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.361</td><td>0.930</td><td>0.520</td><td>0.525</td><td>0.903</td><td>0.664</td><td>0.447</td><td>0.116</td><td>0.247</td><td>0.152</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.696</td><td>0.685</td><td>0.690</td><td>0.786</td><td>0.786</td><td>0.787</td><td>0.640</td><td>0.547</td><td>0.594</td><td>0.563</td></tr><tr><td>Ours</td><td>A</td><td>0.742</td><td>0.821</td><td>0.780</td><td>0.807</td><td>0.856</td><td>0.831</td><td>0.781</td><td>0.698</td><td>0.715</td><td>0.699</td></tr><tr><td rowspan="7">FewRel</td><td>RW-HAC (Elsalah et al., 2017)</td><td>U</td><td>0.175</td><td>0.367</td><td>0.237</td><td>0.357</td><td>0.463</td><td>0.403</td><td>0.108</td><td>0.251</td><td>0.264</td><td>0.216</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.365</td><td>0.710</td><td>0.482</td><td>0.620</td><td>0.800</td><td>0.699</td><td>0.368</td><td>0.282</td><td>0.442</td><td>0.327</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.527</td><td>0.552</td><td>0.539</td><td>0.728</td><td>0.736</td><td>0.732</td><td>0.517</td><td>0.604</td><td>0.632</td><td>0.600</td></tr><tr><td>RSN (Wu et al., 2019)</td><td>P</td><td>0.174</td><td>0.640</td><td>0.274</td><td>0.389</td><td>0.659</td><td>0.489</td><td>0.173</td><td>0.112</td><td>0.239</td><td>0.134</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.446</td><td>0.901</td><td>0.600</td><td>0.701</td><td>0.922</td><td>0.797</td><td>0.448</td><td>0.320</td><td>0.476</td><td>0.358</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.806</td><td>0.843</td><td>0.824</td><td>0.883</td><td>0.896</td><td>0.889</td><td>0.807</td><td>0.827</td><td>0.868</td><td>0.837</td></tr><tr><td>Ours</td><td>A</td><td>0.799</td><td>0.841</td><td>0.820</td><td>0.888</td><td>0.901</td><td>0.894</td><td>0.801</td><td>0.832</td><td>0.862</td><td>0.838</td></tr><tr><td rowspan="7">FewRel-LT</td><td>RW-HAC (Elsalah et al., 2017)</td><td>U</td><td>0.255</td><td>0.322</td><td>0.285</td><td>0.379</td><td>0.421</td><td>0.399</td><td>0.145</td><td>0.190</td><td>0.176</td><td>0.160</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.266</td><td>0.633</td><td>0.374</td><td>0.466</td><td>0.676</td><td>0.552</td><td>0.290</td><td>0.079</td><td>0.154</td><td>0.099</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.563</td><td>0.456</td><td>0.504</td><td>0.717</td><td>0.661</td><td>0.687</td><td>0.377</td><td>0.439</td><td>0.526</td><td>0.462</td></tr><tr><td>RSN (Wu et al., 2019)</td><td>P</td><td>0.211</td><td>0.500</td><td>0.297</td><td>0.350</td><td>0.510</td><td>0.415</td><td>0.193</td><td>0.098</td><td>0.173</td><td>0.117</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.382</td><td>0.858</td><td>0.528</td><td>0.571</td><td>0.873</td><td>0.691</td><td>0.400</td><td>0.123</td><td>0.217</td><td>0.151</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.662</td><td>0.717</td><td>0.689</td><td>0.800</td><td>0.801</td><td>0.800</td><td>0.581</td><td>0.507</td><td>0.538</td><td>0.517</td></tr><tr><td>Ours</td><td>A</td><td>0.650</td><td>0.845</td><td>0.735</td><td>0.790</td><td>0.885</td><td>0.835</td><td>0.676</td><td>0.530</td><td>0.609</td><td>0.550</td></tr></table>
|
| 226 |
+
|
| 227 |
+
assumption that $K$ is known in advance.
|
| 228 |
+
|
| 229 |
+
# 5.2 Is the actively supervised setting better than the two-stage setting?
|
| 230 |
+
|
| 231 |
+
The two settings are compared, from the following two perspectives.
|
| 232 |
+
|
| 233 |
+
In terms of clustering performance, the actively labeled data can provide valuable supervision signals for clustering learning. Compared with RoCORE, a strong baseline supervised by predefined relations, the proposed method improves the four metrics by an average of $10.3\%$ and $5.2\%$ on long-tail TACRED and FewRel-LT, respectively. Long-tail relation distribution is very common in real world. On the uniform FewRel dataset, the ASCORE achieves comparable results. It is worth noting that the number of clusters $K$ has to be supplied for RoCORE. When $K$ is unknown, the improvement will be even enlarged.
|
| 234 |
+
|
| 235 |
+
Regarding labeling costs, both settings are comparable. Note that in the main results, only two instances for each relation are labeled on average. For the two-stage methods, in order to label the relational semantics of a cluster, the annotator has to observe at least one sample. Obviously, the ASCORE does not lead to a significant increase in human efforts.
|
| 236 |
+
|
| 237 |
+
Table 1: Main results on three relation extraction datasets. and represent that the number of relations is known and unknown, respectively (please look at appendix A.2 for more details). U, P and A respectively indicate Unsupervised setting, supervised by Predefined relation setting and Actively supervised setting. The proposed method outperforms the SOTA method and does not need to specify the number of clusters in advance.
|
| 238 |
+
|
| 239 |
+
<table><tr><td>Dataset</td><td>RAND.</td><td>CONF.</td><td>MARG.</td><td>ENTRO.</td><td>GRAD.</td><td>OURS</td></tr><tr><td>TACRED</td><td>15</td><td>13</td><td>18</td><td>16</td><td>17</td><td>18</td></tr><tr><td>FewRel</td><td>40</td><td>34</td><td>40</td><td>37</td><td>40</td><td>40</td></tr><tr><td>FewRel-LT</td><td>27</td><td>19</td><td>25</td><td>26</td><td>30</td><td>35</td></tr><tr><td>ALL</td><td>82(↓ 5.7%)</td><td>66(↓ 24.1%)</td><td>83(↓ 4.6%)</td><td>79(↓ 9.2%)</td><td>87(0%)</td><td>92(↑ 5.8%)</td></tr></table>
|
| 240 |
+
|
| 241 |
+
Table 2: The number of relations discovered by the labeling strategies. (GRAD. as a reference point.)
|
| 242 |
+
|
| 243 |
+
# 6 Analysis and Discussions
|
| 244 |
+
|
| 245 |
+
# 6.1 Analysis on Labeling Strategy
|
| 246 |
+
|
| 247 |
+
Main results (sec. 5) have shown the advantages of the proposed actively supervised clustering setting over the two-stage setting. However, some serious readers may still think the comparison across settings is unfair. To further reduce readers' concerns and show the effectiveness of our labeling strategy, we combine the actively supervised clustering settings with the various active labeling strategies and compare them in terms of relation discovery and clustering performance. Note that the number of key points selected by each strategy is the same (two points per relation on average). The results are shown in tab.2 and tab. 3. It can be seen from tab. 2 that the proposed labeling strategy finds the most relations. Different from the classical strategies that focus only on improving the recognition of relations that have been discovered, the proposed strategy appropriately explores new relations by distance regularization,
|
| 248 |
+
|
| 249 |
+
<table><tr><td>Dataset</td><td>Method</td><td>B3</td><td>V-measure</td><td>ARI</td><td>Classification</td></tr><tr><td rowspan="6">TACRED</td><td>RANDOM</td><td>0.737</td><td>0.800</td><td>0.662</td><td>0.464</td></tr><tr><td>CONFIDENCE</td><td>0.671</td><td>0.752</td><td>0.598</td><td>0.408</td></tr><tr><td>MARGIN</td><td>0.709</td><td>0.787</td><td>0.628</td><td>0.524</td></tr><tr><td>ENTROPY</td><td>0.702</td><td>0.790</td><td>0.633</td><td>0.502</td></tr><tr><td>GRADIENT</td><td>0.767</td><td>0.831</td><td>0.725</td><td>0.670</td></tr><tr><td>Ours</td><td>0.780</td><td>0.851</td><td>0.781</td><td>0.699</td></tr><tr><td rowspan="6">FewRel</td><td>RANDOM</td><td>0.808</td><td>0.882</td><td>0.787</td><td>0.813</td></tr><tr><td>CONFIDENCE</td><td>0.752</td><td>0.851</td><td>0.660</td><td>0.701</td></tr><tr><td>MARGIN</td><td>0.817</td><td>0.881</td><td>0.796</td><td>0.831</td></tr><tr><td>ENTROPY</td><td>0.781</td><td>0.819</td><td>0.779</td><td>0.743</td></tr><tr><td>GRADIENT</td><td>0.814</td><td>0.884</td><td>0.790</td><td>0.827</td></tr><tr><td>Ours</td><td>0.820</td><td>0.894</td><td>0.801</td><td>0.838</td></tr><tr><td rowspan="6">FewRel-LT</td><td>RANDOM</td><td>0.716</td><td>0.814</td><td>0.670</td><td>0.486</td></tr><tr><td>CONFIDENCE</td><td>0.346</td><td>0.514</td><td>0.217</td><td>0.336</td></tr><tr><td>MARGIN</td><td>0.696</td><td>0.806</td><td>0.647</td><td>0.481</td></tr><tr><td>ENTROPY</td><td>0.721</td><td>0.824</td><td>0.664</td><td>0.481</td></tr><tr><td>GRADIENT</td><td>0.719</td><td>0.797</td><td>0.649</td><td>0.498</td></tr><tr><td>Ours</td><td>0.735</td><td>0.835</td><td>0.676</td><td>0.550</td></tr></table>
|
| 250 |
+
|
| 251 |
+
Table 3: Comparison results of labeling strategies on three datasets.
|
| 252 |
+
|
| 253 |
+
which is particularly beneficial for long-tail relation discovery in real applications. Additionally, tab. 3 shows that this strategy is also the best in terms of clustering performance. Benefitting from assigning reasonable loss functions to pseudo labels with different reliability, more pseudo labels can be used for learning without significantly increasing the risk of over-fitting noise.
|
| 254 |
+
|
| 255 |
+
# 6.2 The Effect of Numbers of Actively Labeled Instances
|
| 256 |
+
|
| 257 |
+
In order to more comprehensively evaluate the labeling strategies, experiments are conducted to compare these labeling strategies by varying the number of actively labeled instances, $N^*$ . Figure 3 shows the effect of $N^*$ . Surprisingly, it is found that the random strategy is a very competitive baseline that beats most of the classical labeling strategies given different $N^*$ . This suggests that classical labeling strategies may be better at tasks with known and fixed categories. Although the proposed strategy consistently outperforms all baselines, it is obvious that it has not been fully optimized. It is the authors' belief that it is sufficient to serve as a reasonable baseline for the actively supervised clustering setting and proceeding some useful guidance for future research in this field. Additionally, with the increase of $N^*$ , the performance of the model is improved, but the growth rate is gradually slow. It means that the cost performance of human effort gradually decreases. Therefore, for users with limited budgets, it is also a good choice to discover the primary relations through only a few queries.
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
Figure 3: Performance with different active labeled instances.
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
Figure 4: Performance with different hyperparameter settings.
|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
|
| 269 |
+
# 6.3 Hyperparameter Analysis
|
| 270 |
+
|
| 271 |
+
In this section, we study the effects of reliability threshold $r_h$ and $r_m$ on optimization. Their values are given by the values of the elements ranked $\theta_{ce}\%$ and $\theta_{bce}\%$ from small to large. From Figure 4 it is possible see that: (1) When $\theta_{ce}$ and $\theta_{bce}$ gradually increase from a small value, more training data are used for model optimization, and the performance of the model gradually improves. (2) When the value exceeds a certain threshold, further increasing the $\theta_{ce}$ and $\theta_{bce}$ will introduce more errors into the optimization, which degrades the performance of the model. (3) Compared with $\mathcal{L}_{bce}$ , $\mathcal{L}_{ce}$ loss will make the model converge faster, so its optimal threshold of $\mathcal{L}_{ce}$ should be less than that of $\mathcal{L}_{bce}$ to prevent the overfitting of wrong labels.
|
| 272 |
+
|
| 273 |
+
# 7 Conclusions
|
| 274 |
+
|
| 275 |
+
In this work, we present a new setting, named actively supervised clustering for OpenRE, which provides the necessary guidance for clustering without a significant increase in human efforts. Along with this setting, a labeling strategy tailored for clustering is proposed, maximizing the clustering performance while discovering as many relations as possible. Different loss functions are assigned to pseudo labels with different reliability, which mitigate the risk of over-fitting to noise in pseudo labels. Experimental results show that this method significantly outperforms the existing two-stage methods for OpenRE.
|
| 276 |
+
|
| 277 |
+
# Limitations
|
| 278 |
+
|
| 279 |
+
Considering that the golden labels of all instances have been given in the datasets, we directly use these labels as manual labels without performing the manual labeling process. The practice implicitly assumes that all the manual labels are correct. However, with the increase of labeling scale, problems such as (1) inconsistent labeling granularity across annotators and (2) noise in manual labels gradually emerge. How to effectively improve the labeling quality and the robustness of the clustering model are worthy of attention.
|
| 280 |
+
|
| 281 |
+
# Acknowledgements
|
| 282 |
+
|
| 283 |
+
The authors wish to thank the anonymous reviewers for their helpful comments. This work was partially funded by National Natural Science Foundation of China (No.62076069,62206057,61976056), Shanghai Rising-Star Program (23QA1400200), and Natural Science Foundation of Shanghai (23ZR1403500).
|
| 284 |
+
|
| 285 |
+
# References
|
| 286 |
+
|
| 287 |
+
Jordan T Ash, Chicheng Zhang, Akshay Krishnamurthy, John Langford, and Alekh Agarwal. 2019a. Deep batch active learning by diverse, uncertain gradient lower bounds. arXiv preprint arXiv:1906.03671.
|
| 288 |
+
Jordan T. Ash, Chicheng Zhang, Akshay Krishnamurthy, John Langford, and Alekh Agarwal. 2019b. Deep batch active learning by diverse, uncertain gradient lower bounds. CoRR, abs/1906.03671.
|
| 289 |
+
Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. 2018. Deep clustering for unsupervised learning of visual features. In Proceedings of the European conference on computer vision (ECCV), pages 132-149.
|
| 290 |
+
Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? an analysis of BERT's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286, Florence, Italy. Association for Computational Linguistics.
|
| 291 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805.
|
| 292 |
+
Hady Elsahar, Elena Demidova, Simon Gottschalk, Christophe Gravier, and Frederique Laforest. 2017. Unsupervised open relation extraction. In European Semantic Web Conference, pages 12-16. Springer.
|
| 293 |
+
|
| 294 |
+
Yarin Gal, Riashat Islam, and Zoubin Ghahramani. 2017. Deep bayesian active learning with image data. In International Conference on Machine Learning, pages 1183-1192. PMLR.
|
| 295 |
+
Yoav Goldberg. 2019. Assessing bert's syntactic abilities. ArXiv, abs/1901.05287.
|
| 296 |
+
Xu Han, Hao Zhu, Pengfei Yu, Ziyun Wang, Yuan Yao, Zhiyuan Liu, and Maosong Sun. 2018. FewRel: A large-scale supervised few-shot relation classification dataset with state-of-the-art evaluation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4803-4809, Brussels, Belgium. Association for Computational Linguistics.
|
| 297 |
+
Xuming Hu, Lijie Wen, Yusong Xu, Chenwei Zhang, and Philip Yu. 2020. SelfORE: Self-supervised relational feature learning for open relation extraction. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3673-3682, Online. Association for Computational Linguistics.
|
| 298 |
+
Ganesh Jawahar, Benoit Sagot, and Djamé Seddah. 2019. What does BERT learn about the structure of language? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3651-3657, Florence, Italy. Association for Computational Linguistics.
|
| 299 |
+
Heng Ji and Ralph Grishman. 2011. Knowledge base population: Successful approaches and challenges. In Proceedings of the 49th annual meeting of the association for computational linguistics: Human language technologies, pages 1148-1158.
|
| 300 |
+
Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.
|
| 301 |
+
Diego Marcheggiani and Ivan Titov. 2016. Discrete-State Variational Autoencoders for Joint Discovery and Factorization of Relations. Transactions of the Association for Computational Linguistics, 4:231-244.
|
| 302 |
+
Dan Roth and Kevin Small. 2006. Margin-based active learning for structured output spaces. In Machine Learning: ECML 2006, pages 413-424, Berlin, Heidelberg. Springer Berlin Heidelberg.
|
| 303 |
+
Michael Schlichtkrull, Thomas N. Kipf, Peter Bloem, Rianne van den Berg, Ivan Titov, and Max Welling. 2018. Modeling relational data with graph convolutional networks. In *The Semantic Web*, pages 593-607, Cham. Springer International Publishing.
|
| 304 |
+
Ozan Sener and Silvio Savarese. 2018. Active learning for convolutional neural networks: A core-set approach.
|
| 305 |
+
Livio Baldini Soares, Nicholas FitzGerald, Jeffrey Ling, and Tom Kwiatkowski. 2019. Matching the blanks: Distributional similarity for relation learning. arXiv preprint arXiv:1906.03158.
|
| 306 |
+
|
| 307 |
+
Simon Tong and Daphne Koller. 2001. Support vector machine active learning with applications to text classification. Journal of machine learning research, 2(Nov):45-66.
|
| 308 |
+
|
| 309 |
+
Dan Wang and Yi Shang. 2014a. A new active labeling method for deep learning. In 2014 International Joint Conference on Neural Networks (IJCNN), pages 112-119.
|
| 310 |
+
|
| 311 |
+
Dan Wang and Yi Shang. 2014b. A new active labeling method for deep learning. In 2014 International joint conference on neural networks (IJCNN), pages 112-119. IEEE.
|
| 312 |
+
|
| 313 |
+
Ruidong Wu, Yuan Yao, Xu Han, Ruobing Xie, Zhiyuan Liu, Fen Lin, Leyu Lin, and Maosong Sun. 2019. Open relation extraction: Relational knowledge transfer from supervised data to unsupervised data. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 219-228.
|
| 314 |
+
|
| 315 |
+
Limin Yao, Aria Haghighi, Sebastian Riedel, and Andrew McCallum. 2011. Structured relation discovery using generative models. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 1456-1466, Edinburgh, Scotland, UK. Association for Computational Linguistics.
|
| 316 |
+
|
| 317 |
+
Mo Yu, Wenpeng Yin, Kazi Saidul Hasan, Cicero dos Santos, Bing Xiang, and Bowen Zhou. 2017. Improved neural relation detection for knowledge base question answering. arXiv preprint arXiv:1704.06194.
|
| 318 |
+
|
| 319 |
+
Yuhao Zhang, Victor Zhong, Danqi Chen, Gabor Angeli, and Christopher D. Manning. 2017. Position-aware attention and supervised data improve slot filling. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 35-45, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 320 |
+
|
| 321 |
+
Jun Zhao, Tao Gui, Qi Zhang, and Yaqian Zhou. 2021. A relation-oriented clustering method for open relation extraction. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 219-228.
|
| 322 |
+
|
| 323 |
+
Jun Zhao, Xin Zhao, WenYu Zhan, Tao Gui, Qi Zhang, Liang Qiao, Zhanzhan Cheng, and Shiliang Pu. 2022. Read extensively, focus smartly: A cross-document semantic enhancement method for visual documents NER. In Proceedings of the 29th International Conference on Computational Linguistics, pages 2034-2043, Gyeongju, Republic of Korea. International Committee on Computational Linguistics.
|
| 324 |
+
|
| 325 |
+
# A Appendix
|
| 326 |
+
|
| 327 |
+
# A.1 Compared Active Labeling Strategy
|
| 328 |
+
|
| 329 |
+
To show the superiority of the proposed labeling strategy, the actively supervised clustering is combined with the following classical active learning strategies for comparison. RANDOM The naive baseline of randomly selecting $k$ samples to query labels.
|
| 330 |
+
|
| 331 |
+
CONFIDENCE (Wang and Shang, 2014b) An uncertainty-based active learning algorithm that selects $k$ samples with smallest predicted class probability $\max \{f_{\theta}(x)_i\}_{i = 1,\dots ,C}$
|
| 332 |
+
|
| 333 |
+
MARGIN (Roth and Small, 2006) An uncertainty-based active learning algorithm that selects the bottom $k$ sorted according to the example's multiclass margin, defined as $f_{\theta}(x)_{\hat{y}} - f_{\theta}(x)_{y'}$ , where $\hat{y}$ and $y'$ are the indices of the largest and second largest entries of $f_{\theta}(x)$ .
|
| 334 |
+
|
| 335 |
+
ENTROPY(Wang and Shang, 2014a) An uncertainty-based active learning algorithm that selects the top $k$ samples according to the entropy of the sample's class distribution.
|
| 336 |
+
|
| 337 |
+
GRADIENT(Ash et al., 2019b) A loss based active learning algorithm. The uncertainty is measured as the gradient magnitude with respect to parameters in the output layer.
|
| 338 |
+
|
| 339 |
+
# A.2 Additional Results
|
| 340 |
+
|
| 341 |
+
In this section, more detailed experimental settings and experimental results are given. Specifically, in TACRED dataset, 21 relations are held out as open relations to be discovered. In the underestimation (resp. overestimation) setting, we assume that the number of clusters is 10 (resp. 40). In FewRel and FewRel-LT datasets, 40 relations are held out as open relations and we assume that the number of clusters is 20 (resp. 80) for underestimation (resp. overestimation) setting. The results of the two settings are listed in tab. 4. When $K$ is underestimated, it is observed that the precision of $B^3$ is far lower than recall, which indicates that the model tends to mix multiple relations in the same cluster. When $K$ is overestimated, the recall is far lower than precision, which indicates that the same relation tends to be clustered into multiple subclusters. Although the $F_1$ of $B^3$ metric seems to be tolerable, such imbalance clustering assignments cause great difficulties in relation labeling. If a cluster contains more than one relation, labeling the cluster as any relation will lead to the misidentification of other relations. If
|
| 342 |
+
|
| 343 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td rowspan="2">Setting</td><td colspan="3">B3</td><td colspan="3">V-measure</td><td rowspan="2">ARI</td><td colspan="3">Classification</td></tr><tr><td>Prec.</td><td>Rec.</td><td>F1</td><td>Hom.</td><td>Comp.</td><td>F1</td><td>Prec.</td><td>Rec.</td><td>F1</td></tr><tr><td rowspan="9">TACRED</td><td>RW-HAC (Elsahar et al., 2017)</td><td>U</td><td>0.317</td><td>0.668</td><td>0.430</td><td>0.443</td><td>0.668</td><td>0.532</td><td>0.291</td><td>0.244</td><td>0.246</td><td>0.171</td></tr><tr><td>SelfORE - (Hu et al., 2020)</td><td>U</td><td>0.339</td><td>0.759</td><td>0.469</td><td>0.468</td><td>0.809</td><td>0.593</td><td>0.412</td><td>0.121</td><td>0.244</td><td>0.160</td></tr><tr><td>SelfORE + (Hu et al., 2020)</td><td>U</td><td>0.575</td><td>0.251</td><td>0.349</td><td>0.680</td><td>0.522</td><td>0.591</td><td>0.290</td><td>0.469</td><td>0.481</td><td>0.231</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.517</td><td>0.441</td><td>0.476</td><td>0.631</td><td>0.600</td><td>0.615</td><td>0.434</td><td>0.343</td><td>0.396</td><td>0.360</td></tr><tr><td>RSN (Wu et al., 2019)</td><td>P</td><td>0.312</td><td>0.807</td><td>0.451</td><td>0.445</td><td>0.768</td><td>0.563</td><td>0.354</td><td>0.149</td><td>0.118</td><td>0.225</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.361</td><td>0.930</td><td>0.520</td><td>0.525</td><td>0.903</td><td>0.664</td><td>0.447</td><td>0.116</td><td>0.247</td><td>0.152</td></tr><tr><td>RoCORE + (Zhao et al., 2021)</td><td>P</td><td>0.714</td><td>0.530</td><td>0.608</td><td>0.805</td><td>0.698</td><td>0.748</td><td>0.552</td><td>0.612</td><td>0.649</td><td>0.311</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.696</td><td>0.685</td><td>0.690</td><td>0.786</td><td>0.786</td><td>0.787</td><td>0.640</td><td>0.547</td><td>0.594</td><td>0.563</td></tr><tr><td>Ours (Wu et al., 2017)</td><td>A</td><td>0.742</td><td>0.821</td><td>0.780</td><td>0.807</td><td>0.856</td><td>0.831</td><td>0.781</td><td>0.698</td><td>0.715</td><td>0.699</td></tr><tr><td rowspan="9">FewRel</td><td>RW-HAC (Elsahar et al., 2017)</td><td>U</td><td>0.175</td><td>0.367</td><td>0.237</td><td>0.357</td><td>0.463</td><td>0.403</td><td>0.108</td><td>0.251</td><td>0.264</td><td>0.216</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.365</td><td>0.710</td><td>0.482</td><td>0.620</td><td>0.800</td><td>0.699</td><td>0.368</td><td>0.282</td><td>0.442</td><td>0.327</td></tr><tr><td>SelfORE + (Hu et al., 2020)</td><td>U</td><td>0.639</td><td>0.400</td><td>0.492</td><td>0.793</td><td>0.681</td><td>0.733</td><td>0.492</td><td>0.733</td><td>0.744</td><td>0.365</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.527</td><td>0.552</td><td>0.539</td><td>0.728</td><td>0.736</td><td>0.732</td><td>0.517</td><td>0.604</td><td>0.632</td><td>0.600</td></tr><tr><td>RSN (Wu et al., 2019)</td><td>P</td><td>0.174</td><td>0.640</td><td>0.274</td><td>0.389</td><td>0.659</td><td>0.489</td><td>0.173</td><td>0.112</td><td>0.239</td><td>0.134</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.446</td><td>0.901</td><td>0.600</td><td>0.701</td><td>0.922</td><td>0.797</td><td>0.448</td><td>0.320</td><td>0.476</td><td>0.358</td></tr><tr><td>RoCORE + (Zhao et al., 2021)</td><td>P</td><td>0.824</td><td>0.656</td><td>0.730</td><td>0.896</td><td>0.814</td><td>0.853</td><td>0.739</td><td>0.882</td><td>0.881</td><td>0.439</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.806</td><td>0.843</td><td>0.824</td><td>0.883</td><td>0.896</td><td>0.889</td><td>0.807</td><td>0.827</td><td>0.868</td><td>0.837</td></tr><tr><td>Ours (Wu et al., 2017)</td><td>A</td><td>0.799</td><td>0.841</td><td>0.820</td><td>0.888</td><td>0.901</td><td>0.894</td><td>0.801</td><td>0.832</td><td>0.862</td><td>0.838</td></tr><tr><td rowspan="9">FewRel-LT</td><td>RW-HAC (Elsahar et al., 2017)</td><td>U</td><td>0.255</td><td>0.322</td><td>0.285</td><td>0.379</td><td>0.421</td><td>0.399</td><td>0.145</td><td>0.190</td><td>0.176</td><td>0.160</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.266</td><td>0.633</td><td>0.374</td><td>0.466</td><td>0.676</td><td>0.552</td><td>0.290</td><td>0.079</td><td>0.154</td><td>0.099</td></tr><tr><td>SelfORE + (Hu et al., 2020)</td><td>U</td><td>0.641</td><td>0.298</td><td>0.407</td><td>0.771</td><td>0.589</td><td>0.668</td><td>0.370</td><td>0.589</td><td>0.647</td><td>0.305</td></tr><tr><td>SelfORE (Hu et al., 2020)</td><td>U</td><td>0.563</td><td>0.456</td><td>0.504</td><td>0.717</td><td>0.661</td><td>0.687</td><td>0.377</td><td>0.439</td><td>0.526</td><td>0.462</td></tr><tr><td>RSN (Wu et al., 2019)</td><td>P</td><td>0.211</td><td>0.500</td><td>0.297</td><td>0.350</td><td>0.510</td><td>0.415</td><td>0.193</td><td>0.098</td><td>0.173</td><td>0.117</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td>P</td><td>0.382</td><td>0.858</td><td>0.528</td><td>0.571</td><td>0.873</td><td>0.691</td><td>0.400</td><td>0.123</td><td>0.217</td><td>0.151</td></tr><tr><td>RoCORE + (Zhao et al., 2021)</td><td>P</td><td>0.714</td><td>0.530</td><td>0.608</td><td>0.805</td><td>0.698</td><td>0.748</td><td>0.552</td><td>0.612</td><td>0.649</td><td>0.311</td></tr><tr><td>RoCORE (Zhao et al., 2021)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Ours (Wu et al., 2019)</td><td>A</td><td>0.662</td><td>0.717</td><td>0.689</td><td>0.800</td><td>0.801</td><td>0.800</td><td>0.581</td><td>0.507</td><td>0.538</td><td>0.517</td></tr></table>
|
| 344 |
+
|
| 345 |
+
Table 4: Main results on three relation extraction datasets. and represent that the number of relation types in unlabeled data is correctly and incorrectly estimated, respectively. In addition, - and + denotes the underestimation and overestimation, respectively.
|
| 346 |
+
|
| 347 |
+
a relation is clustered into multiple sub-clusters, the annotators have to label the same relation repeatedly, which leads to a significant increase in labeling costs.
|
| 348 |
+
|
| 349 |
+
A For every submission:
|
| 350 |
+
|
| 351 |
+
A1. Did you describe the limitations of your work? we discuss the limitations in the Limitations section.
|
| 352 |
+
A2. Did you discuss any potential risks of your work? we discuss the risks in the Limitations section.
|
| 353 |
+
A3. Do the abstract and introduction summarize the paper's main claims? In the abstract and introduction section.
|
| 354 |
+
A4. Have you used AI writing assistants when working on this paper? Left blank.
|
| 355 |
+
|
| 356 |
+
B Did you use or create scientific artifacts?
|
| 357 |
+
|
| 358 |
+
section 4
|
| 359 |
+
|
| 360 |
+
B1. Did you cite the creators of artifacts you used? section 4
|
| 361 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts? section 4
|
| 362 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? Not applicable. Left blank.
|
| 363 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it? Not applicable. Left blank.
|
| 364 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? Not applicable. Left blank.
|
| 365 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be. Left blank.
|
| 366 |
+
|
| 367 |
+
C Did you run computational experiments?
|
| 368 |
+
|
| 369 |
+
section 4
|
| 370 |
+
|
| 371 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? section 4
|
| 372 |
+
|
| 373 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 374 |
+
|
| 375 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? section 4
|
| 376 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run? section 4,5
|
| 377 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? section 4
|
| 378 |
+
|
| 379 |
+
# D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 380 |
+
|
| 381 |
+
Left blank.
|
| 382 |
+
|
| 383 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? No response.
|
| 384 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? No response.
|
| 385 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? No response.
|
| 386 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? No response.
|
| 387 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? No response.
|
activelysupervisedclusteringforopenrelationextraction/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:86ab1e0b371b396bae54ca15bc074df9090255469b6191ac148aaeae7ed06b72
|
| 3 |
+
size 676219
|
activelysupervisedclusteringforopenrelationextraction/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d13fe9a3758e6858abac1cf4a9676006b7e246b8c07fb5904c6946a980a92dc6
|
| 3 |
+
size 468937
|
adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/1e8e83d4-ba76-454a-bbfe-afb2c136470e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33eae76a20c5a50efb4bffea77430baec13303e096ccdcdefbc0856ffb33fd84
|
| 3 |
+
size 103227
|
adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/1e8e83d4-ba76-454a-bbfe-afb2c136470e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1281f432ea2790bc02461006d7720b31116a95a1dcec6cc48c699d7e25d4e90
|
| 3 |
+
size 126533
|
adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/1e8e83d4-ba76-454a-bbfe-afb2c136470e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00d1cb163aa828cae3374af2c1cfd929affb84c25c650fa8f69cfc11eb9d13a9
|
| 3 |
+
size 3235614
|
adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/full.md
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adaptive and Personalized Exercise Generation for Online Language Learning
|
| 2 |
+
|
| 3 |
+
Peng Cui Mrinmaya Sachan
|
| 4 |
+
|
| 5 |
+
Department of Computer Science, ETH Zürich
|
| 6 |
+
|
| 7 |
+
{peng.cui, mrinmaya.sachan}@inf.ethz.ch
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Adaptive learning aims to provide customized educational activities (e.g., exercises) to address individual learning needs. However, manual construction and delivery of such activities is a laborious process. Thus, in this paper, we study a novel task of adaptive and personalized exercise generation for online language learning. To this end, we combine a knowledge tracing model that estimates each student's evolving knowledge states from their learning history and a controlled text generation model that generates exercise sentences based on the student's current estimated knowledge state and instructor requirements of desired properties (e.g., domain knowledge and difficulty). We train and evaluate our model on real-world learner interaction data from Duolingo and demonstrate that LMs guided by student states can generate superior exercises. Then, we discuss the potential use of our model in educational applications using various simulations. These simulations show that our model can adapt to students' individual abilities and can facilitate their learning efficiency by personalizing learning sequences. $^{1}$
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Adaptive learning technologies which continuously monitor student progress to dynamically adjust the level or type of learning materials based on the individual's abilities are quite popular (Becker et al., 2018). Empirical studies have shown various benefits of adaptive learning, such as improved student learning outcomes (Bailey et al., 2018; Holthaus et al., 2019), lower dropout rates (Daines et al., 2016), and increased instructor satisfaction (Yarnall et al., 2016). Despite their effectiveness, designing adaptive systems is challenging as it usually involves planning a series of exercises that is personalized and adaptive to each student, which requires
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: We first assess student knowledge states from their learning history and then generate exercises based on estimated states and instructor control of desired properties including domain knowledge (vocabulary) and difficulty levels (expected error numbers).
|
| 19 |
+
|
| 20 |
+
diverse exercise planning as well as an understanding of the student learning process.
|
| 21 |
+
|
| 22 |
+
On the other hand, powered by advances in neural NLP, works have been done for automatically generating text-based exercises or questions for educational purposes in second language learning (Heck and Meurers, 2022; Perez and Cuadros, 2017), mathematics (Polozov et al., 2015; Zhou and Huang, 2019; Wang et al., 2021), and computer science (Susanti et al., 2017). Nevertheless, how to apply these approaches in adaptive systems remains an open question. First, existing methods largely rely on pre-defined question templates or specified information sources (e.g., a passage), thereby resulting in limited knowledge coverage and low question difficulty control, and as a consequence, do not meet each student's individual and nuanced learning needs. Besides, they are usually designed to generate standalone exercises, whereas adaptive learning systems usually require a continuous supply of exercises. Another related line of research studies exercise recommendation to customize learning content based on individual ca
|
| 23 |
+
|
| 24 |
+
pabilities and goals (Wu et al., 2020; Huang et al., 2022). However, these systems are limited by the diversity of the exercise pool.
|
| 25 |
+
|
| 26 |
+
To address the above limitations, we study the task of exercise generation in the context of adaptive learning, where we hypothesize that a student's dynamic knowledge state holds the key to generating adaptive and personalized exercises. Specifically, we ground our study in the domain of language learning to create exercise sentences for translation, of which Figure 1 illustrates the overall process. We start with an assumption about the dynamics between exercise difficulty, vocabulary, and a student's knowledge state (§ 3). Then, we propose an approach (§ 4) that marries knowledge tracing (KT; Corbett and Anderson (1994)), a technique for estimating students' mastery states of knowledge components from their learning history, with a controlled text generation model that generates the next exercise based on instructor requirements, such as specified domain knowledge and target difficulty. We further explore various strategies to adapt the generation of exercises based on students' changing knowledge states. In doing this, our model not only supports personalized generation where the instructor (or the system) can express some desired properties of the generated exercises but is also adaptive to each student's learning progress.
|
| 27 |
+
|
| 28 |
+
We conduct extensive experiments on real-world student learning data from Duolingo², a popular online language learning platform that offers structured and individualized learning content. Our results (§ 5) show that pre-trained LMs can help KT assess student language knowledge while student states estimated by KT can guide LMs to generate adaptive and personalized exercises. We further discuss the potential use of our model in educational applications with simulations. The simulations show that our model can dynamically adjust exercise difficulty to match individual learning progress and facilitate their learning efficiency by customizing exercise sequences.
|
| 29 |
+
|
| 30 |
+
# 2 Related Work
|
| 31 |
+
|
| 32 |
+
Adaptive Learning technologies that dynamically monitor student progress and adjust the course content based on an individual's abilities have demonstrated various benefits in education (Becker et al., 2018). Such systems usually consist of three core components: (1) a domain model which refers to
|
| 33 |
+
|
| 34 |
+
the content and structure of the topic to be taught, (2) a learner model which repeatedly measures and updates learner characteristics, and (3) an adaption model which combines information from the domain and learner model to offer adaptive instructions (Vagale and Niedrite, 2012; Imhof et al., 2020). In this study, we build the learner model based on the KT technique and combine the domain and adaption model into an LM which generates learning content adaptively based on user features captured by the learner model.
|
| 35 |
+
|
| 36 |
+
Knowledge Tracing (Corbett and Anderson, 1994) is the technique to estimate students' knowledge mastery s from their practiced exercises (e) and responses (r):
|
| 37 |
+
|
| 38 |
+
$$
|
| 39 |
+
\mathbf {s} _ {\mathbf {t} + \mathbf {1}} = f _ {K T} ((\mathrm {e} _ {1}, \mathrm {r} _ {1}), (\mathrm {e} _ {2}, \mathrm {r} _ {2}), \dots , (\mathrm {e} _ {\mathrm {t}}, \mathrm {r} _ {\mathrm {t}})). \tag {1}
|
| 40 |
+
$$
|
| 41 |
+
|
| 42 |
+
Early KT approaches model $f_{KT}$ as variants of logistic regression, such as Item Response Theory (IRT) and Additive Factor Model (AFM) (Cen et al., 2008), or probabilistic models such as Bayesian Knowledge Tracing (Corbett and Anderson, 1994) and its variants (Yudelson et al., 2013; Käser et al., 2017). These approaches heavily rely on their assumptions of the learning process which are often incomplete. In recent years, neural networks have become the dominant method in this area. Piech et al. (2015) proposed the first Deep Knowledge Tracing model based on Recurrent Neural Networks. After that, various architectures have been applied to model different characteristics of learning, such as self-attention (Pandey and Karypis, 2019; Shin et al., 2021), memory networks (Abdelrahman and Wang, 2019), and graph neural networks (Tong et al., 2020).
|
| 43 |
+
|
| 44 |
+
Exercise Generation. Previous exercise generation approaches for language learning primarily retrieve and manipulate text to create fixed types of exercises, such as gap fill and multiple-choice exercises (Agarwal and Mannem, 2011; Perez and Cuadros, 2017; Heck and Meurers, 2022), which are limited by the richness of the corpus. Besides them, some Question Generation (QG) approaches have been proposed for educational purposes (Zhao et al., 2022; Wang et al., 2021). While some of them allow for user control of certain question properties, they do not consider learners' individual and dynamic learning needs and progress. Thus, they cannot achieve the goal of adaptive learning. Recently, Srivastava and Goodman (2021) proposed an adaptive question generation model that connects question difficulty with student knowledge.
|
| 45 |
+
|
| 46 |
+
However, it neither models students' fine-grained knowledge states nor provides control over domain knowledge. Consequently, it is insufficient for practical use.
|
| 47 |
+
|
| 48 |
+
Controlled Text Generation (CTG) methods aim to steer text generation toward certain attributes. Existing CTG approaches can be broadly classified into three types: directly training a class-conditional language model (CCLM) (Keskar et al., 2019; Ziegler et al., 2019; Ficler and Goldberg, 2017), guiding a model via an attribute discriminator (Dathathri et al., 2020; Liu et al., 2020), or manipulating decoder's logits (also referred to as weighted decoding) (Holtzman et al., 2018; Yang and Klein, 2021). This study explores difficulty and lexical control in generating language learning exercises. Additionally, we seek to adapt the model's controllability to different users by building the dependency between control signals and individual states.
|
| 49 |
+
|
| 50 |
+
# 3 Problem Formalization
|
| 51 |
+
|
| 52 |
+
Let $\mathcal{H}_{\leq n} = \{(e_1,r_1),\ldots ,(e_n,r_n)\}$ be a student's learning history consisting of $n$ exercises and responses. Here, $e_i = \{w_{i,1},\dots,w_{i,|e_i|}\}$ is an exercise sentence for translation and $r_i\in \{0,1\}^{|e_i|}$ is the correctness label for each word in $e_i$ . We generate the next exercise $e_{n + 1}$ based on:
|
| 53 |
+
|
| 54 |
+
- $C_{n+1}$ : knowledge components that should be involved in $e_{n+1}$ . In language learning, we consider a word as a knowledge component, and therefore $C_{n+1} = \{c_1, \dots, c_{|C_{n+1}|} | c_* \in \mathcal{V}\}$ is a subset of vocabulary $\mathcal{V}$ that should be included in the output. In general, the knowledge components can be user or system defined based on the current learning material.
|
| 55 |
+
- $\mathbf{s}_{\mathbf{n} + 1}$ : a student's knowledge state for the knowledge components (the vocabulary) after $n$ interactions. $\mathbf{s}_{\mathbf{n} + 1}$ can be formalized as a $|\mathcal{V}|$ -dimensional vector with each entry between 0 and 1 indicating the mastery probability of that word.
|
| 56 |
+
- $d_{n+1}$ : the expected difficulty of $e_{n+1}$ . We use individual performance to estimate problem difficulty. For a particular student, the difficulty of an exercise is defined as the expected number of word errors the student would make in translating it.
|
| 57 |
+
|
| 58 |
+
Given the above setting, we formalize our task as:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
e _ {n + 1} = \underset {e} {\arg \max } P (e | \mathbf {s} _ {\mathbf {n} + \mathbf {1}}, d _ {n + 1}, C _ {n + 1}), \tag {2}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where $e_{n + 1}$ satisfies the following constraints:
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
\forall c \in C _ {n + 1}: \exists i, e _ {n + 1: i: i + | c |} = c, \tag {3}
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
d _ {n + 1} = \sum_ {w \in e _ {n + 1}} (1 - \mathbf {s} _ {\mathbf {n} + \mathbf {1}} [ w ]), \tag {4}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
corresponding to word constraint and difficulty constraint, respectively. Here, $\mathbf{s}_{\mathbf{n} + 1}[w]$ represents the correct probability of translating word $w$ ; therefore, the sum of $\{1 - \mathbf{s}[w]\mid ,w\in e\}$ is the expected number of errors in translating $e$ , which can be seen as a measure of the difficulty of $e$ .
|
| 75 |
+
|
| 76 |
+
Our task is distinct from previous CTG works in two aspects: 1) our control is dynamic; student states acting as control are also learnable; 2) there is a strong dependency among control signals (Eqs. 3 and 4), which is non-trivial to learn. Note that in this work, we measure difficulty via student performance and only consider vocabulary knowledge in defining $s$ for simplicity. Other definitions of sentence difficulty (e.g., definitions that incorporate other types of linguistic knowledge such as syntax) can be explored in future work.
|
| 77 |
+
|
| 78 |
+
# 4 Methodology
|
| 79 |
+
|
| 80 |
+
Our model is illustrated in Figure 2. We first employ a knowledge tracer $\mathcal{T}$ (§ 4.1) to estimate a student's time-varying knowledge states. Then, we build an LM-based exercise generator $\mathcal{G}$ (§ 4.2) to create exercises based on estimated states and specified difficulty and knowledge components (words). We jointly optimize the two modules with an inconsistency loss (§ 4.3) at training and apply a constrained decoding strategy (§ 4.4) at inference. Finally, we discuss how our model can accommodate personalized learning recommendation algorithms on the fly (§ 4.5).
|
| 81 |
+
|
| 82 |
+
# 4.1 Knowledge Tracing
|
| 83 |
+
|
| 84 |
+
The goal of our knowledge tracing model $\mathcal{T}$ is to estimate a student's latest knowledge state $\mathbf{s}_{\mathbf{n} + 1}$ given previous interactions $\mathcal{H}_{\leq n}$ . We adopt the deep knowledge tracing (DKT) model proposed by Piech et al. (2015). We concatenate past exercises as a word sequence $\mathbf{e}_{1:n} = \{w_{1,1},\dots,w_{n,|e_n|}\}$ and past responses as a label sequence $\mathbf{r}_{1:n} = \{r_{1,1},\dots,r_{n,|e_n|}\}$ , where $w_{i,j}$ and $r_{i,j}$ represent the $j$ th word or label of the $i$ th exercise. Then we
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
Figure 2: The framework of our proposed model. We estimate a student's latest knowledge state $\mathbf{s}_{\mathbf{n} + 1}$ from the learning history $\mathcal{H}_n$ , and then combine it with user-specified difficulty $d_{n + 1}$ and knowledge components $C_{n + 1}$ to generate the next exercise $e_{n + 1}$ . The two modules are jointly trained with an inconsistency loss to penalize their disagreement.
|
| 88 |
+
|
| 89 |
+
convert the two sequences into word embeddings $\vec{\mathbf{e}}_{1:n}$ and label embeddings $\vec{\mathbf{r}}_{1:n}$ and send them to an LSTM encoder to predict the next state $\mathbf{s}_{\mathbf{n} + 1}$ :
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mathbf {h} _ {\mathbf {n}} = \operatorname {L S T M} \left(\vec {\mathbf {e}} _ {n} + \vec {\mathbf {r}} _ {n}; \mathbf {h} _ {\mathbf {n} - 1}\right), \tag {5}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathbf {s} _ {\mathbf {n} + \mathbf {1}} = \operatorname {s i g m o i d} \left(\mathrm {W} _ {\mathrm {s}} * \mathbf {h} _ {\mathbf {n}} + \mathrm {b} _ {\mathrm {s}}\right). \tag {6}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
The model is trained to predict the binary word labels of the next exercise using the estimated knowledge state. The cross-entropy loss for a single student's history of $N$ interactions is computed as:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\mathcal {L} _ {c e} = \sum_ {i = 1} ^ {| N |} \sum_ {j = 1} ^ {| e _ {i} |} \mathrm {C E} \left(r _ {i, j}, \mathbf {s} _ {i} \left[ w _ {i, j} \right]\right). \tag {7}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
We adopt the regularization strategy proposed by Yeung and Yeung (2018) to stabilize training:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\mathcal {L} _ {r _ {\{1, 2 \}}} = \sum_ {n = 2} ^ {N} \sum_ {i = 1} ^ {| \mathcal {V} |} \left| \mathbf {s _ {n}} ^ {(i)} - \mathbf {s _ {n - 1}} ^ {(i)} \right| ^ {\{1, 2 \}}, \tag {8}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $\mathcal{L}_{r_1}$ ensures that only the states of relevant knowledge components are updated, and $\mathcal{L}_{r_2}$ penalizes the vibration. The final objective of $\mathcal{T}$ is $\mathcal{L}_{\mathcal{T}} = \mathcal{L}_{ce} + \lambda_1*\mathcal{L}_{r_1} + \lambda_2*\mathcal{L}_{r_2}$ with $\lambda$ for balance.
|
| 112 |
+
|
| 113 |
+
# 4.2 Controllable Exercise Generator
|
| 114 |
+
|
| 115 |
+
Our exercise generator $\mathcal{G}$ is fine-tuned from a pretrained LM. Specifically, we generate an exercise $e$ based on a student's current knowledge state s, target words $C$ , and expected difficulty $d$ (we drop the interaction index to reduce clutter). We parameterize the inputs as follows:
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\mathbf {x} = \left[ f _ {s} (\mathbf {s}); f _ {d} (d); E m b \left(c _ {1}, \dots , c _ {| C |}\right) \right], \tag {9}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
where knowledge state $s$ and scalar difficulty $d$ are projected to control vectors via two feedforward layers $f_{s}$ and $f_{d}$ , and $C$ are mapped to word embeddings. The training objective for generating a single exercise is defined as:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathcal {L} _ {\mathcal {G}} = - \sum_ {t} ^ {| e |} \log P \left(w _ {t} \mid w _ {1}, \dots , w _ {t - 1}, \mathbf {x}\right). \tag {10}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
During training, we sample a proportion of words from reference exercises as $C$ and calculate difficulty $d$ from ground-truth correctness labels, whereas states $s$ are estimated by $\mathcal{T}$ . At inference, $d$ and $C$ can be determined by instructors or the system, allowing automated and human intervention.
|
| 128 |
+
|
| 129 |
+
# 4.3 Joint Learning with Inconsistency Loss
|
| 130 |
+
|
| 131 |
+
We jointly optimize the knowledge tracer $\mathcal{T}$ and exercise generator $\mathcal{G}$ with an inconsistency loss inspired by Cui and Hu (2021), enabling the two modules to learn from each other. Concretely, after generating an exercise $e$ , we calculate its difficulty using input state s via Eq. 4, which should be as close to the input difficulty $d$ as possible:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\mathcal {L} _ {i n c} = | d - \sum_ {w \in e} (1 - \mathbf {s} [ w ]) |. \tag {11}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
Since the second term is non-differentiable due to the argmax operation involved in producing $e$ , we replace it with "soft" tokens:
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
\mathcal {L} _ {i n c} = | d - \sum_ {t} ^ {| e |} (1 - \mathbf {p} _ {t} \odot \mathbf {s}) |, \tag {12}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $\mathbf{p}_t = \text{softmax}(\mathbf{o}_t / \tau)$ is the $t^{th}$ distribution normalized from its logits $\mathbf{o}_t \in \mathbb{R}^{|\mathcal{V}|}$ with a temperature parameter $\tau$ , and $\odot$ represents dot product.
|
| 144 |
+
|
| 145 |
+
For the generator $\mathcal{G}$ , this loss constrains the generation toward the target difficulty. For $\mathcal{T}$ , the LM distributions $p_{\theta}$ provide similarity information between vocabulary words. This is analogous to the relationship of knowledge components, which has been shown helpful in knowledge tracing (Tong et al., 2020). The final objective of our model is
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\mathcal {L} = \mathcal {L} _ {\mathcal {T}} + \gamma_ {1} \mathcal {L} _ {\mathcal {G}} + \gamma_ {2} \mathcal {L} _ {i n c}.
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
# 4.4 Lexical Difficulty Constrained Decoding
|
| 152 |
+
|
| 153 |
+
We propose a beam search-based decoding algorithm to enforce the constraints introduced in § 3. At each step, we update the beam according to:
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
Y _ {t} = \underset {\mathbf {y} < t \in Y _ {t - 1}, y _ {t} \in \mathcal {V}} {\operatorname {a r g t o p k}} \log P (\mathbf {y} _ {\leq t} | \mathbf {x}) + \sum_ {F _ {i} ^ {\prime} \in \mathcal {F}} \alpha_ {i} F _ {i} (\mathbf {y} _ {\leq t}), \tag {13}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
where $Y_{t}$ is the set of decoded hypotheses in step $t$ and $k$ is the beam size. The first term is the standard objective of beam search and the second term is a weighted combination of additional scoring functions in terms of the satisfaction of different constraints. We formulate our constraints $\mathcal{F}$ in Eqs. 3 and 4 as:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
F _ {c} (\mathbf {y}) = \sum_ {c \in \mathcal {C}} I (c, \mathbf {y}), a n d F _ {d} (\mathbf {y}) = - | d - h (\mathbf {y}) |,
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
corresponding to the satisfaction of word constraint and difficulty constraint, respectively. $I(c,y)$ is a Boolean predicate indicating whether word $c$ is included in sequence $\mathbf{y}$ and $h(\mathbf{y})$ calculates its difficulty via Eq. 4.
|
| 166 |
+
|
| 167 |
+
Succinctly, the decoding algorithm works in three steps. First, we expand the current $k$ hypotheses to $k \times |\mathcal{V}|$ candidates. Then, we prune the search space by dropping candidates that are not in the top- $k_{F}$ list of any scoring functions $F$ . Finally, we rescore the pruned candidates based on the full objective (Eq. 13) and select the $k$ -best ones to update the beam.
|
| 168 |
+
|
| 169 |
+
However, we found that greedily applying $F_{d}$ in the rescoring step would bias the decoder toward sequences with difficult words in the earlier steps. Drawing inspiration from Lu et al. (2022), we use lookahead heuristics that incorporate future estimates into the decoding process. Concretely, to score a subsequence $\mathbf{y}_{<t}$ , we first greedily decode the next $l + 1$ steps "soft" tokens (i.e., distributions): $\tilde{\mathbf{y}}_{t:t+l} = [\mathbf{p}_t, \dots, \mathbf{p}_{t+l}]$ . Then, we combine the constraint satisfaction of decoded $\mathbf{y}_{<t}$ and the estimated future $\tilde{\mathbf{y}}_{t:t+l}$ :
|
| 170 |
+
|
| 171 |
+
$$
|
| 172 |
+
\tilde {F} _ {c} (\mathbf {y} _ {< t}) = \sum_ {c \in \mathcal {C}} \max (I (c, \mathbf {y} _ {< t}), \max _ {j \in [ t, t + l ]} P (y _ {j} = c)),
|
| 173 |
+
$$
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\tilde {F} _ {d} (\mathbf {y} _ {< \mathbf {t}}) = - | d - h (\mathbf {y} _ {< \mathbf {t}}) - \sum_ {j = t} ^ {t + l} 1 - \mathbf {p} _ {j} \odot \mathbf {s} |.
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
The procedure of our decoding algorithm is in Appendix A.
|
| 180 |
+
|
| 181 |
+
# 4.5 Plug-and-Play Personalized Generation
|
| 182 |
+
|
| 183 |
+
Our model can be flexibly plugged into an existing personalized learning recommendation algorithm to automatically generate novel and customized exercises. We showcase this functionality using the EXPECTIMAX curriculum planning strategy derived from DKT. Given a student's current state $\mathbf{s}_{\mathbf{n}}$ , we can calculate the expected knowledge state after
|
| 184 |
+
|
| 185 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Word-level</td><td colspan="2">Exercise-level</td></tr><tr><td>Seen</td><td>Unseen</td><td>Seen</td><td>Unseen</td></tr><tr><td>Ensemble</td><td>73.41</td><td>70.58</td><td>65.55</td><td>64.93</td></tr><tr><td>Standard DKT</td><td>80.46</td><td>75.54</td><td>72.32</td><td>71.54</td></tr><tr><td>DKTLM,τ=0.5</td><td>80.47</td><td>75.51</td><td>72.39</td><td>71.47</td></tr><tr><td>DKTLM,τ=1.0</td><td>80.49</td><td>75.54</td><td>72.38</td><td>71.49</td></tr><tr><td>DKTLM,τ=2.0</td><td>80.55</td><td>75.69</td><td>72.41</td><td>71.74</td></tr><tr><td>DKTLM,τ=3.0</td><td>80.54</td><td>75.48</td><td>72.33</td><td>71.52</td></tr><tr><td>DKTLM,τ=5.0</td><td>80.31</td><td>75.46</td><td>72.28</td><td>71.50</td></tr></table>
|
| 186 |
+
|
| 187 |
+
Table 1: AUC $(\times 100)$ performance of knowledge tracing models on seen and unseen text examples. Exercise-level results are obtained by averaging word-level predictions.
|
| 188 |
+
|
| 189 |
+
practicing a new exercise $e$ using our KT model $\mathcal{T}$ :
|
| 190 |
+
|
| 191 |
+
$$
|
| 192 |
+
\tilde {\mathbf {s}} _ {n + 1} = \sum_ {r \in \{0, 1 \} ^ {| e |}} P (r) * \mathcal {T} (\mathbf {s} _ {n}, (e, r)), \tag {14}
|
| 193 |
+
$$
|
| 194 |
+
|
| 195 |
+
where $\mathcal{T}(\cdot)$ computes the updated knowledge state given a new interaction $(e,r)$ . The probability of label sequence $r$ is computed from $\mathbf{s}_n$ assuming conditional independence $P(r) = \prod_{i=1}^{|e|} P(r_i)$ , where $P(r_i) = \mathbf{s}_n[e_i]$ . EXCEPTIMAX scores $e$ based on how well it can improve a student's average knowledge state, i.e., $F_k(e) = \overline{\mathbf{s}}_{n+1} - \overline{\mathbf{s}}_n$ , where $\overline{\mathbf{s}}$ denotes mean of the vector. We incorporate $F_k$ into the decoding objective (Eq. 13) and call it EXCEPTIMAX-GEN.
|
| 196 |
+
|
| 197 |
+
In principle, our model can accommodate different recommendation algorithms with different ranking functions $F_{k}$ . The key benefit is that our model can generate novel exercises, while retrieval-based systems can only select exercises from an existing pool.
|
| 198 |
+
|
| 199 |
+
# 5 Experimental Results and Analysis
|
| 200 |
+
|
| 201 |
+
We experiment on the English track of Duolingo Second Language Acquisition Modeling (SLAM) dataset (Settles et al., 2018), which contains about 1 million interactions of 2.6k learners over the first 30 days of learning a second language. For each student, we use the first $80\%$ of interactions for training, and the subsequent and the last $10\%$ for validation and testing, respectively. Details of the dataset and experimental setup are in Appendix B.
|
| 202 |
+
|
| 203 |
+
We first evaluate the ability of the KT model to estimate student knowledge states in § 5.1. Then, we analyze the effectiveness of the exercise generator in § 5.2. Lastly, we showcase the superiority of our model in two educational scenarios with simulation experiments in § 5.3.
|
| 204 |
+
|
| 205 |
+
<table><tr><td rowspan="2">Models</td><td colspan="2">BLEU ↑</td><td colspan="2">METEOR ↑</td><td colspan="2">KC-Coverage (%) ↑</td><td colspan="2">D-MAE ↓</td><td rowspan="2">Invalid (%) ↓</td></tr><tr><td>Seen</td><td>Unseen</td><td>Seen</td><td>Unseen</td><td>Seen</td><td>Unseen</td><td>Seen</td><td>Unseen</td></tr><tr><td>EGH</td><td>9.23</td><td><0.01</td><td>18.79</td><td>6.05</td><td>14.26</td><td>2.49</td><td>0.396</td><td>1.500</td><td>0.071</td></tr><tr><td>AQGH+d</td><td>10.28</td><td><0.01</td><td>20.15</td><td>7.16</td><td>15.84</td><td>2.95</td><td>0.463</td><td>0.985</td><td>1.674</td></tr><tr><td>EGC</td><td>18.41</td><td>5.21</td><td>45.36</td><td>36.14</td><td>99.77</td><td>90.63</td><td>0.367</td><td>0.837</td><td>0.301</td></tr><tr><td>EGC+d</td><td>11.84</td><td>15.94</td><td>40.89</td><td>42.10</td><td>96.23</td><td>91.62</td><td>0.564</td><td>0.679</td><td>0.385</td></tr><tr><td>APEGs+C+d</td><td>22.47</td><td>34.60</td><td>56.15</td><td>44.01</td><td>99.61</td><td>95.71</td><td>0.246</td><td>0.604</td><td>0.283</td></tr><tr><td>-joint learning</td><td>22.01</td><td>33.15</td><td>55.80</td><td>42.85</td><td>99.63</td><td>94.08</td><td>0.251</td><td>0.619</td><td>0.281</td></tr><tr><td>-constrained decoding</td><td>21.58</td><td>32.06</td><td>55.43</td><td>40.49</td><td>99.59</td><td>94.77</td><td>0.263</td><td>0.681</td><td>0.277</td></tr><tr><td>Upper bound</td><td>53.65</td><td>41.24</td><td>74.97</td><td>52.10</td><td>99.75</td><td>95.96</td><td>0.060</td><td>0.302</td><td>0.233</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 2: Results of exercise generation. APEG is our proposed model, and AQG is an adaptively difficulty-controlled question generation model proposed by Srivastava and Goodman (2021). The subscripts represent whether historical interactions $(\mathcal{H})$ , target words $(C)$ , difficulty $(d)$ , and student state $(s)$ are used to generate exercises.
|
| 208 |
+
|
| 209 |
+
# 5.1 Knowledge Tracing Evaluation
|
| 210 |
+
|
| 211 |
+
We use the standard AUC (ROC) as the metric of knowledge tracing in accordance with Settles et al. (2018). We denote our DKT model jointly trained with the LM-based exercise generator as $\mathrm{DKT_{LM}}$ and compare it with the following baselines: 1) Ensemble (Osika et al., 2018) which is one of the winning methods of the SLAM challenge that combines a RNN and a GBDT classifier. We reimplement this model to use texts only as input and remove other side features, such as response time. We do this because we are interested in its performance in a general setting where we do not assume the availability of diverse side information; 2) the standard DKT (Piech et al., 2015) which is trained only with the KT loss $\mathcal{L}_{\mathcal{T}}$ . We use it to verify whether jointly learning with an LM can help predict student language knowledge.
|
| 212 |
+
|
| 213 |
+
We present the results in Table 1, where we can see that DKT outperforms the Ensemble model when only text features are used, and our best model $\mathrm{DKT}_{\mathrm{LM},\tau = 2}$ outperforms DKT on all metrics. We hypothesize the performance gain comes from the word similarity information entailed in the output distributions $p_{\theta}$ of the LM. This can be regarded as the relationship between knowledge components, which is demonstrated effective in knowledge tracing (Tong et al., 2020). To verify this, we tune the temperature $\tau$ which controls the sparsity of output distributions: $\tau \rightarrow 0$ produces a sparse distribution that is too assertive and provides little relationship information, while $\tau \rightarrow \infty$ produces a uniform distribution where all words are evenly related. The results in the second section of Table 1 suggest that a medium $\tau$ improves the performance, while a small $(\tau = 1)$ or large $(\tau = 5)$ is harmful, particularly for predicting unseen data.
|
| 214 |
+
|
| 215 |
+
The broader message from this observation is that the knowledge encoded in pre-trained LMs has the potential to improve knowledge tracing in the domain of language learning. We also conduct an analysis of the influence of regularization terms Eq. 8, detailed in Appendix C.
|
| 216 |
+
|
| 217 |
+
# 5.2 Exercise Generation Evaluation
|
| 218 |
+
|
| 219 |
+
The main results of exercise generation are presented in Table 2, which are split according to whether the exercises are seen in the training set. Evaluation metrics include reference-based BLEU (Papineni et al., 2002) and METEOR (Banerjee and Lavie, 2005), KC-Coverage which is the percentage of target knowledge components (words) that appear in the outputs, D-MAE which is the mean absolute error between the input difficulty and output difficulty, Invalid which is the percentage of exercises that have grammar errors detected using an automatic tool<sup>3</sup>. Since we generate exercises for language learning, we expect a valid exercise to be grammatically correct. We analyze the performance from the following aspects.
|
| 220 |
+
|
| 221 |
+
Lexical Controllability. We first examine the lexical controllability of our model, which is crucial for generating personalized exercises for language learning. We compare our model with two baselines:1) $\mathrm{EG}_{\mathcal{H}}$ which generates the next exercise based on the student's historical interactions; and 2) $\mathrm{AGQ}_{\mathcal{H} + d}$ which generates the next exercise based on historical interactions and a target difficulty. The two baselines perform poorly on BLEU, METEOR, and KC-Coverage metrics, particularly
|
| 222 |
+
|
| 223 |
+
<table><tr><td></td><td>BLEU ↑</td><td>Coverage (%) ↑</td><td>D-MAE ↓</td></tr><tr><td>w/o lookahead</td><td>20.46</td><td>99.18</td><td>0.263</td></tr><tr><td>w/ lookahead</td><td>21.20</td><td>99.30</td><td>0.257</td></tr></table>
|
| 224 |
+
|
| 225 |
+
Table 3: Comparison of generation performance with and without lookahead on the validation set.
|
| 226 |
+
|
| 227 |
+
for unseen data. This indicates that they cannot predict the accurate content of the next exercise based on historical data or difficulty information, possibly because there is no strong connection within a sequence of exercises or such connection cannot be captured by an LM. We note that $\mathrm{EG}_{\mathcal{H}}$ performs well on the validness metric. However, upon inspecting its results, we found the model almost only copies exercises from history, with less than $0.02\%$ novel generations. The same issue is observed in $\mathrm{AQG}_{\mathcal{H} + d}$ where more than $90\%$ exercises are repetitive. We follow Srivastava and Goodman (2021) to improve its novelty using a repetition penalty during the generation, but this results in far more invalid exercises $(1.7\%)$ . In comparison, our model achieves a better balance between generalization ability and fluency.
|
| 228 |
+
|
| 229 |
+
Effect of Student Modeling. To investigate whether student modeling helps exercise generation, we build two baselines without student knowledge states: 1) $\mathrm{EG}_C$ which conditions generation on target KCs (words) only, and 2) $\mathrm{EG}_{C + d}$ on both target words and difficulty. The former variant can be considered a keyword-to-text generation model, while the latter imposes additional difficulty control. Our full model $\mathrm{APEG_{s + C + d}}$ significantly outperforms both of them, which proves our aforementioned hypothesis that a student's dynamic knowledge states must be considered in generating adaptive and personalized exercises. An interesting observation is that incorporating difficulty control improves the performance on unseen data, indicating the model to some degree learns generalizable difficulty information. Nevertheless, our further analysis shows the model is not adaptive to students of different abilities, which will be discussed in § 5.3.
|
| 230 |
+
|
| 231 |
+
Ablation Study. The key challenge of our task is to learn the dependency between student knowledge, vocabulary, and exercise difficulty (Eqs. 3 and 4). To understand which parts of our model contribute to this goal, we build two ablated variants by removing the joint learning strategy (§ 4.3) and the constrained decoding algorithm (§ 4.4), re
|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
Figure 3: Distributions of accumulated word difficulty in four equally sized segments of 2000 sampled exercise sentences.
|
| 235 |
+
|
| 236 |
+
spectively. As shown in the second section of Table 2, the search-based method is slightly better than the learning-based method, while combining them leads to the best performance.
|
| 237 |
+
|
| 238 |
+
We further explore the effect of the lookahead strategy on difficulty constraints. Table 3 presents the ablation results on the validation set, where we can see lookahead strategy improves both generation quality and controllability. To understand how it works, we measure the distribution of difficulty in different regions of exercise sentences. Such distribution is computed as the accumulated word difficulty in four equally sized segments of 2000 sampled sentences. As shown in Figure 3, the difficult words of reference exercises are largely concentrated in the $2^{nd}$ and $4^{th}$ quarter. Our decoding algorithm with lookahead produces a similar result, while removing lookahead would bias the distribution toward $2^{nd}$ and $3^{rd}$ quarter. This confirms our assumption that naively applying $F_{d}$ would greedily select difficult words in the early steps, which is not the distribution of reference exercises. Our decoding algorithm avoids this issue by estimating the future and therefore achieves better results.
|
| 239 |
+
|
| 240 |
+
Upper Bound Analysis. When we train our model, we use ground-truth difficulty $d$ and target words $C$ obtained from references; however, the student states $s$ are estimated from the KT model. We conduct an upper bound analysis to understand the influence of the accuracy of $s$ on the generation performance. Since a student's actual mastery of every vocabulary word is not available, we choose to replace the ground-truth difficulty levels $d$ with those estimated from $s$ . As shown in the last section of Table 2, all metrics are considerably boosted when the inconsistency between states $s$ and difficulty $d$ is eliminated. This again proves the effect
|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
Figure 4: Generating 50 additional exercises of specified difficulty levels for different student groups using APEG $s + C + d$ (adaptive) and non-adaptive $\mathrm{EG}_{C + d}$ models. The Y-axis is the ratio of output difficulty $d_{out}$ to input difficulty $d_{in}$ ; the closer to 1 (dotted line) the better. Solid lines are averaged results of group students at each step, and shadows represent standard deviations.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+
<table><tr><td>d_in</td><td>Target words</td><td colspan="4">Generated exercises</td><td>d_out</td></tr><tr><td colspan="7">Avg. knowledge state s = 0.32</td></tr><tr><td>1.0</td><td>{men}</td><td colspan="4">Fifteen men.</td><td>1.25</td></tr><tr><td>2.0</td><td>{study}</td><td colspan="4">I study English.</td><td>2.18</td></tr><tr><td>3.0</td><td>{airport}</td><td colspan="4">Where is the airport?</td><td>2.73</td></tr><tr><td colspan="7">Avg. knowledge state s = 0.65</td></tr><tr><td>1.0</td><td>{profile}</td><td colspan="4">He has a famous profile.</td><td>0.94</td></tr><tr><td>2.0</td><td>{white, bitter}</td><td colspan="4">The white mushroom is bitter.</td><td>1.75</td></tr><tr><td>3.0</td><td>{hit, nail}</td><td colspan="4">She hit the nail on the head.</td><td>2.89</td></tr></table>
|
| 250 |
+
|
| 251 |
+
Table 4: Examples of exercises based on different controls. $d_{in}$ is the input difficulty while $d_{out}$ is the output difficulty estimated by our knowledge tracing model. The degree of highlight represents a student's mastery of vocabulary words (the darker the harder).
|
| 252 |
+
|
| 253 |
+
of incorporating student states and explains how such information comes to play: the knowledge states explicitly convey the dynamics between control signals $d$ , $\mathcal{C}$ , and target exercises $e$ , which is non-trivial to learn by the model itself.
|
| 254 |
+
|
| 255 |
+
Case Study. We provide a few cases in Table 4. We can see our model can dynamically adjust the exercise content according to specified words, target difficulty, as well as students' different mastery states of the vocabulary. The exercises generated for advanced students (avg. state $= 0.65$ ) are generally more difficult than for poor students (avg. state $= 0.32$ ) under the same input difficulty.
|
| 256 |
+
|
| 257 |
+
# 5.3 Educational Applications
|
| 258 |
+
|
| 259 |
+
In this subsection, we showcase the potential applications of our model in two educational scenarios with simulation experiments.
|
| 260 |
+
|
| 261 |
+
# 5.3.1 Adaptive Difficulty Calibration
|
| 262 |
+
|
| 263 |
+
A crucial requirement for adaptive learning systems is to dynamically adjust the difficulty of learning items to match each student's learning
|
| 264 |
+
|
| 265 |
+
progress (Becker et al., 2018). However, previous difficulty-controlled question generation approaches are mainly based on inherent problem difficulty, independent of individual abilities (Susanti et al., 2017; Kumar et al., 2019). Ideally, our model can achieve this goal by learning the dependency between difficulty and student knowledge states. To verify this, we generate 50 additional exercises of specified difficulties for each student after their existing interactions. At each step, we construct input by sampling a target word from the vocabulary and a difficulty level from a uniform distribution [1, 3]. We compare our full model $\mathrm{APEG}_{s + C + d}$ with its variant $\mathrm{EG}_{C + d}$ which achieves the best difficulty controllability for unseen data. This baseline can be considered a vanilla non-adaptive difficulty-controlled exercise generation model.
|
| 266 |
+
|
| 267 |
+
In this simulation, we are interested in whether the difficulty controllability of our model can adapt to students of various knowledge levels. To this end, we rank students based on their average knowledge states $\overline{\mathbf{s}}$ and split the result accordingly. As shown in Figure 4, the difficulty controllability of the baseline is not reliable across different groups. In particular, it tends to generate harder (up to $2 \times d_{in}$ ) exercises for the bottom 10 percentile students but easier (up to $\frac{1}{2} \times d_{in}$ ) ones for the top 10 percentile students, although it performs well for the intermediate 80 percentile students. In comparison, our adaptive model is also slightly biased toward the intermediate group but much more consistent than the baseline, with less than $20\%$ fluctuations on average. Besides, we can see from the shadows that the baseline experiences huge variances at each step, indicating it is not adaptive to different knowledge states, even though the students within a group are at a similar level.
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
Figure 5: Simulation results over 30 exercises. The X-axis is the number of exercises, and the Y-axis is students' average predicted knowledge state $\tilde{\mathbf{s}}$ indicating a student's overall mastery of the vocabulary.
|
| 271 |
+
|
| 272 |
+
# 5.3.2 Improving Learning Efficiency
|
| 273 |
+
|
| 274 |
+
We now examine whether our model can be used to improve student learning efficiency by personalizing exercise sequences. To this end, we customize 30 continuous exercises for 50 sampled students using our proposed EXCEPTIMAX-GEN (§ 4.5) and the original EXCEPTIMAX. Both of them aim to maximize the expected knowledge state of the next step $\overline{\mathbf{s}}_{n + 1}$ . For the former, at each step, we first find the best single word that can maximize $\overline{\mathbf{s}}_{n + 1}$ and then generate the next exercise based on the selected word and a fixed difficulty of 1. For the latter, we directly select the best exercise from the pool. We update students' knowledge states after each practice and repeat this process until we collect 30 exercises. We compare the change in $\overline{\mathbf{s}}$ to measure which strategy is more efficient in improving students' knowledge.
|
| 275 |
+
|
| 276 |
+
The simulation results are shown in Figure 5. We also include a randomly selected exercise sequence as a lower bound, which turns out to harm student learning most of the time. The decrease in knowledge state is possibly caused by overly difficult exercises which would lead to wrong answers and reduce the predicted probability. Under the same practice opportunities, exercises generated by EXPECTIMAX-GEN lead to faster knowledge growth than those selected by EXPECTIMAX. Upon further inspection, we found about $70\%$ of them are unseen in the corpus. This explains the efficiency of EXPECTIMAX-GEN as it can create novel exercises targeting individual needs on the fly while EXPECTIMAX is limited by the pool.
|
| 277 |
+
|
| 278 |
+
# 5.3.3 Qualitative Discussions on Simulation
|
| 279 |
+
|
| 280 |
+
Our simulations are based on the DKT model. We note that some previous studies have observed inconsistencies between DKT behaviors and the human learning process (Shen et al., 2021). Thus, we adopt a simple regularization approach (Eqs. 5 and 6) to alleviate such inconsistencies (Yeung and Yeung, 2018), which we found can reduce the variance of simulation results and improve KT performance (Appendix C).
|
| 281 |
+
|
| 282 |
+
A popular argument regarding the relationship between the difficulty of learning content and student outcomes is that the level of difficulty should be set just above the learner's current knowledge, i.e., $d \approx 0.5$ (Settles and Meeder, 2016; Gallego-Durán et al., 2018). During the simulations, we found EXPECTIMAX does not follow this heuristic but tends to generate relatively easy exercises ( $d < 0.3$ mostly) repeatedly using certain words, consistent with the finding in Tschiatschek et al. (2022). One possible reason is that easier exercises are more likely to produce correct answers, which in turn increases the averaged predicted probability of DKT (i.e., estimated knowledge state).
|
| 283 |
+
|
| 284 |
+
Nevertheless, the above observations do not influence our conclusion as the superiority of our model comes from its ability to adapt to students' knowledge (§ 5.3.1) and generate customized exercises targeting individual needs (§ 5.3.2), independent of the simulation policy.
|
| 285 |
+
|
| 286 |
+
# 6 Conclusion
|
| 287 |
+
|
| 288 |
+
We propose an adaptive and personalized exercise generation model combining recent advances in knowledge tracing and controllable generation using pre-trained LMs. Our approach works by learning the dynamics between exercise difficulty and student vocabulary knowledge in the domain of language learning. Experimental results on real-world language learning data from Duolingo demonstrate that our model can generate adaptive and personalized exercises needed in an Educational setting. We further showcase our model's applicability in Education with simulation studies.
|
| 289 |
+
|
| 290 |
+
# Ethics Statement
|
| 291 |
+
|
| 292 |
+
The learner data used in this study are anonymized by Settles et al. (2018) and, to the best of our knowledge, do not contain sensitive information. We foresee no further ethical or privacy concerns with the work.
|
| 293 |
+
|
| 294 |
+
# Limitations
|
| 295 |
+
|
| 296 |
+
We state the limitations of this work from the following aspects. First, we make an initial assumption about the dynamics between exercise difficulty, vocabulary, and student knowledge. While we believe our assumption is sensible in the domain of language learning, we acknowledge that we make some simplifications for the ease of modeling. For example, we measure difficulty using individual performance, whereas a better way could be combining it with inherent problem difficulty, e.g., text complexity. Besides, we only consider vocabulary mastery in defining student knowledge and predicting their performance. Exploring more dimensions of language knowledge (e.g., syntax) might lead to a finer-grained personalization. Second, our model relies on student learning logs to estimate their real-time knowledge states. This model might face the cold start problem when dealing with insufficient history. Though it is beyond the scope of this study, techniques like computerized adaptive testing can be used to combat this problem. Lastly, due to the lack of a real learning environment, we discuss the educational promise of our model with simulation experiments. In the future, a user study can be incorporated to validate our conclusions.
|
| 297 |
+
|
| 298 |
+
# References
|
| 299 |
+
|
| 300 |
+
Ghodai Abdelrahman and Qing Wang. 2019. Knowledge tracing with sequential key-value memory networks. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 175-184.
|
| 301 |
+
Manish Agarwal and Prashanth Mannem. 2011. Automatic gap-fill question generation from text books. In Proceedings of the sixth workshop on innovative use of NLP for building educational applications, pages 56-64.
|
| 302 |
+
Allison Bailey, Nithya Vaduganathan, Tyce Henry, Renee Laverdiere, and Lou Pugliese. 2018. Making digital learning work: Success strategies from six leading universities and community colleges. Boston: Massachusetts: Boston Consulting Group.
|
| 303 |
+
Satanjeev Banerjee and Alon Lavie. 2005. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. In Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and/or summarization, pages 65-72.
|
| 304 |
+
Samantha Adams Becker, Malcolm Brown, Eden Dahlstrom, Annie Davis, Kristi DePaul, Veronica Diaz, and Jeffrey Pomerantz. 2018. Horizon report
|
| 305 |
+
|
| 306 |
+
2018 higher education edition brought to you by educause. Technical report, EDUCAUSE.
|
| 307 |
+
Hao Cen, Kenneth Koedinger, and Brian Junker. 2008. Comparing two irt models for conjunctive skills. In International Conference on Intelligent Tutoring Systems, pages 796-798. Springer.
|
| 308 |
+
Albert T Corbett and John R Anderson. 1994. Knowledge tracing: Modeling the acquisition of procedural knowledge. User modeling and user-adapted interaction, 4(4):253-278.
|
| 309 |
+
Peng Cui and Le Hu. 2021. Topic-guided abstractive multi-document summarization. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 1463-1472, Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 310 |
+
Jennifer B Daines, Tonya Troka, and John M Santiago. 2016. Improving performance in trigonometry and pre-calculus by incorporating adaptive learning technology into blended models on campus. In 2016 ASEE Annual Conference & Exposition.
|
| 311 |
+
Sumanth Dathathri, Andrea Madotto, Janice Lan, Jane Hung, Eric Frank, Piero Molino, Jason Yosinski, and Rosanne Liu. 2020. Plug and play language models: A simple approach to controlled text generation. In International Conference on Learning Representations.
|
| 312 |
+
Jessica Ficler and Yoav Goldberg. 2017. Controlling linguistic style aspects in neural language generation. In Proceedings of the Workshop on Stylistic Variation, pages 94-104, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 313 |
+
Francisco J Gallego-Durán, Rafael Molina-Carmona, and Faraón Llorens-Largo. 2018. Measuring the difficulty of activities for adaptive learning. Universal access in the information society, 17:335-348.
|
| 314 |
+
Tanja Heck and Detmar Meurers. 2022. Parametrizable exercise generation from authentic texts: Effectively targeting the language means on the curriculum. In Proceedings of the 17th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2022), pages 154-166.
|
| 315 |
+
Matthias Holthaus, Tansu Pancar, and Per Bergamin. 2019. Recommendation acceptance in a simple adaptive learning system.
|
| 316 |
+
Ari Holtzman, Jan Buys, Maxwell Forbes, Antoine Bosselut, David Golub, and Yejin Choi. 2018. Learning to write with cooperative discriminators. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1638-1649, Melbourne, Australia. Association for Computational Linguistics.
|
| 317 |
+
Shuyan Huang, Qiongqiong Liu, Jiahao Chen, Xiangen Hu, Zitao Liu, and Weiqi Luo. 2022. A design of a simple yet effective exercise recommendation system in k-12 online learning. In International Conference
|
| 318 |
+
|
| 319 |
+
on Artificial Intelligence in Education, pages 208-212. Springer.
|
| 320 |
+
Christof Imhof, Per Bergamin, and Stéphanie McGarrity. 2020. Implementation of adaptive learning systems: Current state and potential. Online teaching and learning in higher education, pages 93-115.
|
| 321 |
+
Tanja Käser, Severin Klingler, Alexander G Schwing, and Markus Gross. 2017. Dynamic bayesian networks for student modeling. IEEE Transactions on Learning Technologies, 10(4):450-462.
|
| 322 |
+
Nitish Shirish Keskar, Bryan McCann, Lav R Varshney, Caiming Xiong, and Richard Socher. 2019. Ctrl: A conditional transformer language model for controllable generation. arXiv preprint arXiv:1909.05858.
|
| 323 |
+
Vishwajeet Kumar, Yuncheng Hua, Ganesh Ramakrishnan, Guilin Qi, Lianli Gao, and Yuan-Fang Li. 2019. Difficulty-controllable multi-hop question generation from knowledge graphs. In International Semantic Web Conference, pages 382-398. Springer.
|
| 324 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880.
|
| 325 |
+
Ruibo Liu, Guangxuan Xu, Chenyan Jia, Weicheng Ma, Lili Wang, and Soroush Vosoughi. 2020. Data boost: Text data augmentation through reinforcement learning guided conditional generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9031-9041, Online. Association for Computational Linguistics.
|
| 326 |
+
Ximing Lu, Sean Welleck, Peter West, Liwei Jiang, Jungo Kasai, Daniel Khashabi, Ronan Le Bras, Lianhui Qin, Youngjae Yu, Rowan Zellers, Noah A. Smith, and Yejin Choi. 2022. NeuroLogic a*esque decoding: Constrained text generation with lookahead heuristics. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 780-799, Seattle, United States. Association for Computational Linguistics.
|
| 327 |
+
Anton Osika, Susanna Nilsson, Andrii Sydorchuk, Faruk Sahin, and Anders Huss. 2018. Second language acquisition modeling: An ensemble approach. In Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 217-222, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 328 |
+
Shalini Pandey and George Karypis. 2019. A self-attentive model for knowledge tracing. In Proceedings of the 12th International Conference on Educational Data Mining, EDM 2019, Montréal, Canada, July
|
| 329 |
+
|
| 330 |
+
2-5, 2019. International Educational Data Mining Society (IEDMS).
|
| 331 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318.
|
| 332 |
+
Naiara Perez and Montse Cuadros. 2017. Multilingual call framework for automatic language exercise generation from free text. In Proceedings of the Software Demonstrations of the 15th Conference of the European Chapter of the Association for Computational Linguistics, pages 49-52.
|
| 333 |
+
Chris Piech, Jonathan Bassen, Jonathan Huang, Surya Ganguli, Mehran Sahami, Leonidas J Guibas, and Jascha Sohl-Dickstein. 2015. Deep knowledge tracing. Advances in neural information processing systems, 28.
|
| 334 |
+
Oleksandr Polozov, Eleanor O'Rourke, Adam M Smith, Luke Zettlemoyer, Sumit Gulwani, and Zoran Popovic. 2015. Personalized mathematical word problem generation. In Twenty-Fourth International Joint Conference on Artificial Intelligence.
|
| 335 |
+
Burr Settles, Chris Brust, Erin Gustafson, Masato Hagiwara, and Nitin Madnani. 2018. Second language acquisition modeling. In Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 56-65, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 336 |
+
Burr Settles and Brendan Meeder. 2016. A trainable spaced repetition model for language learning. In Proceedings of the 54th annual meeting of the association for computational linguistics (volume 1: Long papers), pages 1848-1858.
|
| 337 |
+
Shuanghong Shen, Qi Liu, Enhong Chen, Zhenya Huang, Wei Huang, Yu Yin, Yu Su, and Shijin Wang. 2021. Learning process-consistent knowledge tracing. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining, KDD '21, page 1452-1460, New York, NY, USA. Association for Computing Machinery.
|
| 338 |
+
Dongmin Shin, Yugeun Shim, Hangyeol Yu, Seewoo Lee, Byungsoo Kim, and Youngduck Choi. 2021. Saint+: Integrating temporal features for ednet correctness prediction. In LAK21: 11th International Learning Analytics and Knowledge Conference, pages 490-496.
|
| 339 |
+
Megha Srivastava and Noah Goodman. 2021. Question generation for adaptive education. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 692-701, Online. Association for Computational Linguistics.
|
| 340 |
+
|
| 341 |
+
Yuni Susanti, Takenobu Tokunaga, Hitoshi Nishikawa, and Hiroyuki Obari. 2017. Controlling item difficulty for automatic vocabulary question generation. Research and practice in technology enhanced learning, 12(1):1-16.
|
| 342 |
+
Shiwei Tong, Qi Liu, Wei Huang, Zhenya Hunag, Enhong Chen, Chuanren Liu, Haiping Ma, and Shijin Wang. 2020. Structure-based knowledge tracing: an influence propagation view. In 2020 IEEE International Conference on Data Mining (ICDM), pages 541-550. IEEE.
|
| 343 |
+
Sebastian Tschiatschek, Maria Knobelsdorf, and Adish Singla. 2022. Equity and fairness of bayesian knowledge tracing. arXiv preprint arXiv:2205.02333.
|
| 344 |
+
Vija Vagale and Laila Niedrite. 2012. Learner model's utilization in the e-learning environments. In DB&Local Proceedings, pages 162-174. CiteSeer.
|
| 345 |
+
Zichao Wang, Andrew Lan, and Richard Baraniuk. 2021. Math word problem generation with mathematical consistency and problem context constraints. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 5986-5999, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 346 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations, pages 38-45.
|
| 347 |
+
Zhengyang Wu, Ming Li, Yong Tang, and Qingyu Liang. 2020. Exercise recommendation based on knowledge concept prediction. Knowledge-Based Systems, 210:106481.
|
| 348 |
+
Kevin Yang and Dan Klein. 2021. FUDGE: Controlled text generation with future discriminators. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3511-3535, Online. Association for Computational Linguistics.
|
| 349 |
+
Louise Yarnall, Barbara Means, and Tallie Wetzel. 2016. Lessons learned from early implementations of adaptive courseware.
|
| 350 |
+
Chun-Kit Yeung and Dit-Yan Yeung. 2018. Addressing two problems in deep knowledge tracing via prediction-consistent regularization. In Proceedings of the Fifth Annual ACM Conference on Learning at Scale, pages 1-10.
|
| 351 |
+
Michael V Yudelson, Kenneth R Koedinger, and Geoffrey J Gordon. 2013. Individualized bayesian knowledge tracing models. In Artificial Intelligence in Education: 16th International Conference, AIED 2013, Memphis, TN, USA, July 9-13, 2013. Proceedings 16, pages 171-180. Springer.
|
| 352 |
+
|
| 353 |
+
Zhenjie Zhao, Yufang Hou, Dakuo Wang, Mo Yu, Chengzhong Liu, and Xiaojuan Ma. 2022. Educational question generation of children storybooks via question type distribution learning and event-centric summarization. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5073-5085.
|
| 354 |
+
Qingyu Zhou and Danqing Huang. 2019. Towards generating math word problems from equations and topics. In Proceedings of the 12th International Conference on Natural Language Generation, pages 494-503, Tokyo, Japan. Association for Computational Linguistics.
|
| 355 |
+
Daniel M Ziegler, Nisan Stiannon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. 2019. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593.
|
| 356 |
+
|
| 357 |
+
# A Decoding Algorithm
|
| 358 |
+
|
| 359 |
+
Algorithm 1 Pseudo-code for our Lexical Difficulty Constrained Decoding
|
| 360 |
+
Input: Target words $\mathcal{C}$ , difficulty $d$ , a collection of score functions $\mathcal{F}$ and their weights $\alpha$ , max step $T$ , beam size $k$
|
| 361 |
+
Output: $k$ hypotheses $Y_{T}$ in the last step
|
| 362 |
+
1: $Y_{0}\gets \mathrm{InitBeam}()\qquad \triangleright \{\langle BOS\rangle \}$
|
| 363 |
+
2: for $t = 1,t\le T,t + +$ do
|
| 364 |
+
3: $Y_{t}\gets \emptyset$
|
| 365 |
+
4: Candidates $\leftarrow$ Generate $(Y_{t - 1},1)$ expand
|
| 366 |
+
5: for $F\in \mathcal{F}$ do prune candidates
|
| 367 |
+
6: $Y_{t}\gets Y_{t}\cup \underset {\mathbf{y}_{\le t}\in \mathrm{Candidates}}{\operatorname {argtopk}}F(\mathbf{y}_{\le t})$
|
| 368 |
+
7: end for
|
| 369 |
+
8: for $\mathbf{y}_{\le t}\in Y_t$ do generate $l$ -step lookaheads
|
| 370 |
+
9: $\tilde{\mathbf{y}}_{t + 1:t + l} = \mathbf{Generate}(y_{\le t},l)$
|
| 371 |
+
10: end for
|
| 372 |
+
11: $Y_{t}\gets \underset {\mathbf{y}_{\le t}\in Y_{t}}{\operatorname {argtopk}}\sum_{F_{i}\in \mathcal{F}}\alpha_{i}F_{i}(\mathbf{y}_{\le t}\circ \tilde{\mathbf{y}}_{t + 1:t + l})$
|
| 373 |
+
12: end for
|
| 374 |
+
13: return $Y_{T}$
|
| 375 |
+
|
| 376 |
+
# B Experimental Setup
|
| 377 |
+
|
| 378 |
+
# B.1 Dataset Details
|
| 379 |
+
|
| 380 |
+
The statistics of our dataset are summarized in Table 5. Each interaction records a target sentence, per-token correctness labels of the student's response, and meta information such as user nationality and response time. We group interactions by user_id (anonymous) in temporal order to obtain per-student interaction sequences. Refer to Settles et al. (2018) for more descriptions of the dataset.
|
| 381 |
+
|
| 382 |
+
<table><tr><td rowspan="2">Statistics</td><td colspan="3">Split</td></tr><tr><td>Train</td><td>Dev</td><td>Test</td></tr><tr><td># of students</td><td>2,593</td><td>2,593</td><td>2,593</td></tr><tr><td># of interactions</td><td>824,012</td><td>115,770</td><td>114,586</td></tr><tr><td># of questions</td><td>7,780</td><td>5,524</td><td>5,847</td></tr><tr><td># of words (KCs)</td><td>1,967</td><td>1,839</td><td>1,879</td></tr></table>
|
| 383 |
+
|
| 384 |
+
# B.2 Implementation Details
|
| 385 |
+
|
| 386 |
+
We implement our models using the Transformers library (Wolf et al., 2020). Our knowledge tracing model is a three-layer LSTM with a hidden size of 100. We train it for 10 epochs with the regularization weights $\lambda_1 = 0.5, \lambda_2 = 0.1$ , selected on the validation set. For the exercise generator, we fine-tune a pre-trained BART-base
|
| 387 |
+
|
| 388 |
+
(Lewis et al., 2020) for up to 10 epochs. An early stop strategy is applied when the loss on the validation set does not decrease for three continuous epochs. We first train the DKT and exercise generator separately until both of them converge. Then, we jointly optimized the two models with hyperparameters: $\gamma_{1} = 1, \gamma_{2} = 0.8, \tau = 2$ . During generation, we set the beam size to 4. The weights $\alpha$ for word and difficulty constraints are set to 0.1 and 0.5 as the word constraint is easy to achieve in our experiments. We use Nvidia Tesla A100 with 40 GB of GPU memory for training and inference. On a single GPU, one training epoch of the exercise generator takes about 30 minutes, and that of DKT takes about 7 minutes when they are separately trained. Joint training takes a longer time, about an hour for one epoch. We report the average results over three runs.
|
| 389 |
+
|
| 390 |
+
# C Influence of Regularization in KT
|
| 391 |
+
|
| 392 |
+
To inspect the influence of regularization terms (Eq. 8) on the KT performance, we conduct a grid search for $\lambda_{1}$ and $\lambda_{2}$ on the validation set. As can be seen from Table 6 and Table 7, $\mathcal{L}_{r_1}$ consistently improves exercise-level performance at the cost of sacrificing word-level performance, whereas $\mathcal{L}_{r_2}$ with a suitable weight $(\lambda_{2} = 0.3)$ can improve both in most cases. This suggests the students' knowledge states transit gradually over time. We choose $\lambda_{1} = 0.5$ , $\lambda_{2} = 0.1$ for the best balance.
|
| 393 |
+
|
| 394 |
+
Table 5: The statistics of SLAM English track.
|
| 395 |
+
|
| 396 |
+
<table><tr><td>AUC\(\lambda_2\) \(\lambda_1\)</td><td>0.0</td><td>0.1</td><td>0.3</td><td>0.5</td></tr><tr><td>0.0</td><td>79.51</td><td>79.50</td><td>79.57</td><td>79.53</td></tr><tr><td>0.1</td><td>79.44</td><td>79.45</td><td>79.49</td><td>79.52</td></tr><tr><td>0.3</td><td>79.42</td><td>79.40</td><td>79.44</td><td>79.36</td></tr><tr><td>0.5</td><td>79.32</td><td>79.43</td><td>79.41</td><td>79.30</td></tr></table>
|
| 397 |
+
|
| 398 |
+
Table 6: Validation results (AUC×100) of word-level prediction under varying regularization weights.
|
| 399 |
+
|
| 400 |
+
<table><tr><td>AUC\(\lambda_2\) \(\lambda_1\)</td><td>0.0</td><td>0.1</td><td>0.3</td><td>0.5</td></tr><tr><td>0.0</td><td>70.89</td><td>70.98</td><td>70.85</td><td>71.15</td></tr><tr><td>0.1</td><td>71.04</td><td>71.02</td><td>71.06</td><td>71.23</td></tr><tr><td>0.3</td><td>71.41</td><td>71.31</td><td>71.43</td><td>71.31</td></tr><tr><td>0.5</td><td>71.41</td><td>71.48</td><td>71.45</td><td>71.45</td></tr></table>
|
| 401 |
+
|
| 402 |
+
Table 7: Validation results (AUC $\times {100}$ ) of exercise-level prediction under varying regularization weights.
|
| 403 |
+
|
| 404 |
+
A For every submission:
|
| 405 |
+
|
| 406 |
+
A1. Did you describe the limitations of your work? Limitations
|
| 407 |
+
A2. Did you discuss any potential risks of your work? Ethical and Privacy Considerations
|
| 408 |
+
A3. Do the abstract and introduction summarize the paper's main claims? 3
|
| 409 |
+
A4. Have you used AI writing assistants when working on this paper? Left blank.
|
| 410 |
+
|
| 411 |
+
B Did you use or create scientific artifacts?
|
| 412 |
+
|
| 413 |
+
Appendix B.1
|
| 414 |
+
|
| 415 |
+
B1. Did you cite the creators of artifacts you used? Appendix B.1
|
| 416 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts? Not applicable. Left blank.
|
| 417 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? Not applicable. Left blank.
|
| 418 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 419 |
+
Appendix B.1
|
| 420 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? Not applicable. Left blank.
|
| 421 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be. Appendix B.1
|
| 422 |
+
|
| 423 |
+
C Did you run computational experiments?
|
| 424 |
+
|
| 425 |
+
Appendix B.2
|
| 426 |
+
|
| 427 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? Appendix B.2
|
| 428 |
+
|
| 429 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? Appendix B.2
|
| 430 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 431 |
+
Appendix B.2
|
| 432 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? Appendix B.2
|
| 433 |
+
|
| 434 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants? Left blank.
|
| 435 |
+
|
| 436 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? No response.
|
| 437 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? No response.
|
| 438 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? No response.
|
| 439 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? No response.
|
| 440 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? No response.
|
adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:184aad6c41125ef0fdac1b1fcc6b7dca838f05c4afea65ff5de170cef912f612
|
| 3 |
+
size 482202
|
adaptiveandpersonalizedexercisegenerationforonlinelanguagelearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:238f91448a8d9779ce0cf73fba394d8d32e204e20ec219d16b95e45d21e2ef9b
|
| 3 |
+
size 579670
|
adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/7a308754-c09b-4a37-a605-d2c7cd65fe75_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6dbdced8b1da1dd3154f03cc62b0e9b39aa9799521c9d9fcb678b8f6521b16f8
|
| 3 |
+
size 108166
|
adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/7a308754-c09b-4a37-a605-d2c7cd65fe75_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0a7f1cfe41774728f7b2c33a652b3fafbd451a5c81263b461bf9b66a4ebc166
|
| 3 |
+
size 131408
|
adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/7a308754-c09b-4a37-a605-d2c7cd65fe75_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb9f0a8964a97c67fa18eb1176728721d51b24944cb8e0b85f636eb623036ce5
|
| 3 |
+
size 1440588
|
adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/full.md
ADDED
|
@@ -0,0 +1,471 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AD-KD: Attribution-Driven Knowledge Distillation for Language Model Compression
|
| 2 |
+
|
| 3 |
+
Siyue Wu $^{1}$ , Hongzhan Chen $^{1}$ , Xiaojun Quan $^{1*}$ , Qifan Wang $^{2}$ and Rui Wang $^{3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ School of Computer Science and Engineering, Sun Yat-sen University, China $^{2}$ Meta AI
|
| 6 |
+
|
| 7 |
+
<sup>3</sup>Vipshop (China) Co., Ltd., China
|
| 8 |
+
|
| 9 |
+
1 {wusy39, chenhzh59}@mail2.sysu.edu.cn, quanxj3@mail.sysu.edu.cn
|
| 10 |
+
|
| 11 |
+
wqfcr@fb.com
|
| 12 |
+
|
| 13 |
+
$^{3}$ mars198356@hotmail.com
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
Knowledge distillation has attracted a great deal of interest recently to compress pre-trained language models. However, existing knowledge distillation methods suffer from two limitations. First, the student model simply imitates the teacher's behavior while ignoring the underlying reasoning. Second, these methods usually focus on the transfer of sophisticated model-specific knowledge but overlook data-specific knowledge. In this paper, we present a novel attribution-driven knowledge distillation approach, which explores the token-level rationale behind the teacher model based on Integrated Gradients (IG) and transfers attribution knowledge to the student model. To enhance the knowledge transfer of model reasoning and generalization, we further explore multi-view attribution distillation on all potential decisions of the teacher. Comprehensive experiments are conducted with BERT on the GLUE benchmark. The experimental results demonstrate the superior performance of our approach to several state-of-the-art methods.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
Transformer-based pre-trained language models (PLMs), such as BERT (Devlin et al., 2019) and RoBERTa (Liu et al., 2019), have aroused widespread interest among Natural Language Processing (NLP) researchers in recent years. These language models are first pre-trained on large-scale unlabeled corpora to learn the general representation of language, and then fine-tuned on specific downstream tasks to effectively transfer the general knowledge to target domains. This pre-training and fine-tuning paradigm leads to state-of-the-art performances in various NLP tasks such as natural language understanding. However, with the rapid growth of the model scale, the deployment of large-scale PLMs becomes challenging, especially in low-resource scenarios. To this end, a variety
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: An example from the QNLI dataset (Rajpurkar et al., 2016) to illustrate different knowledge distillation techniques including the proposed attribution-driven method. Darker colors mean larger attribution scores.
|
| 25 |
+
|
| 26 |
+
of model compression techniques have been developed. Among them, knowledge distillation (KD) (Hinton et al., 2015) is a newly emerging technology that aims to obtain a small student model by distilling knowledge from a large teacher model and achieve comparable performance.
|
| 27 |
+
|
| 28 |
+
Existing knowledge distillation methods can be divided into three categories, namely response-based, feature-based, and relation-based (Gou et al., 2021). While response-based methods (Turc et al., 2019) directly distill the final output, e.g. probability distribution, from the top of the teacher, feature-based (Sun et al., 2019) and relation-based methods (Liu et al., 2022) try to align the features from intermediate layers of teacher and student models and minimize the difference. To transfer comprehensive knowledge from the teacher, a common practice is to combine response-based methods with the other two (Park et al., 2021). However, due to the capacity gap between the teacher and the student, feature-based and relation-based methods may not necessarily bring improvement to response-based methods (Liang et al., 2022). To sum up, existing knowledge distillation methods have two limitations. First, they mainly focus on understanding what the teacher's behavior is, instead of why the teacher behaves like this, hinder
|
| 29 |
+
|
| 30 |
+
ing the reasoning and generalization ability of the student model. Second, they pay more attention to distilling sophisticated model-specific knowledge from intermediate layers but neglect data-specific knowledge, which may contain valuable rationale information to understand how the teacher model arrives at a prediction.
|
| 31 |
+
|
| 32 |
+
To address the above limitations, in this paper we propose a novel Attribution-Driven Knowledge Distillation (AD-KD) approach that transfers attribution-based knowledge from the teacher to the student. As shown in Figure 1, the attribution information reflects the importance of different tokens towards the prediction, which contains reasoning knowledge of the model and can be complementary to the soft-label knowledge. By transferring such attribution knowledge, the student is allowed to learn the token-level rationale behind the teacher's behavior and thus generalizes better. Specifically, we utilize Integrated Gradients (IG) (Sundararajan et al., 2017), a well-established gradient-based attribution method, to calculate the importance score of each input token. To reduce the influence of trivial dimensions in the teacher's input embeddings, we further adopt the top- $K$ strategy to filter out dimensions with low attribution scores. The remaining attribution scores are aggregated and normalized to denote the importance of individual tokens. Moreover, we extract the attribution knowledge for all possible predictions rather than just the prediction with the highest probability. By transferring the multi-view attribution knowledge, the student learns a more comprehensive understanding of the teacher's soft-label distribution.
|
| 33 |
+
|
| 34 |
+
Extensive experiments are conducted with BERT (Devlin et al., 2019) on the GLUE benchmark (Wang et al., 2018). The experimental results demonstrate the effectiveness and superiority of our approach over several state-of-the-art baselines. Furthermore, we show that attribution knowledge from different layers contains different information, while the input layer contains the most prominent attribution knowledge for distillation. To summarize, the main contributions are threefold. First, we propose a novel attribution-driven knowledge distillation framework for language model compression that effectively transfers attribution knowledge from the teacher to the student. Second, we extract multi-view attribution knowledge based on model predictions to learn comprehensive reasoning knowledge. Third, we systematically validate
|
| 35 |
+
|
| 36 |
+
AD-KD on the GLUE benchmark and show its superior performance over state-of-the-art baselines.
|
| 37 |
+
|
| 38 |
+
# 2 Related Work
|
| 39 |
+
|
| 40 |
+
# 2.1 Knowledge Distillation
|
| 41 |
+
|
| 42 |
+
Knowledge distillation methods can be divided into three categories, namely response-based, feature-based and relation-based KD (Gou et al., 2021). Response-based KD was first proposed by Hinton et al. (2015), where the final output is adopted to transfer the label knowledge. Sanh et al. (2019) and Turc et al. (2019) applied this idea to BERT and yielded smaller models with minor performance drops. Recently, feature-based and relation-based distillation methods have drawn a lot of attention, which transfer knowledge contained in the intermediate layers to the student. For feature-based methods, Sun et al. (2019) first regarded the hidden representations of the [CLS] token as hints to extract sentence-level features from the teacher. Jiao et al. (2020) and Sun et al. (2020b) further matched the hidden representations of all tokens between teacher and student models. Sun et al. (2020a) proposed contrastive distillation on intermediate representations. As for relation-based methods, Park et al. (2021) proposed CKD which adopts pair-wise distance and triple-wise angle to model the sophisticated relations among token representations from both horizontal and vertical directions. Based on CKD, Liu et al. (2022) further extracted structural relations from multi-granularity representations and distilled this kind of well-organized multi-granularity structural knowledge hierarchically across layers. Wang et al. (2020, 2021) generalized the conventional query-key attention to query-query attention, key-key attention, and value-value attention. Different from these methods, we investigate knowledge distillation from the attribution perspective, which reveals the teacher's reasoning behavior and can be used to transfer comprehensive data-specific knowledge. More details about the differences between existing methods and ours are discussed in Appendix B.
|
| 43 |
+
|
| 44 |
+
# 2.2 Attribution
|
| 45 |
+
|
| 46 |
+
Attribution analysis (Baehrens et al., 2010; Ancona et al., 2018) aims at assigning importance scores to intermediate or input features of a network. Occlusion-based methods (Zeiler and Fergus, 2014) compute the importance score of each feature by erasing that feature and measuring the
|
| 47 |
+
|
| 48 |
+
difference between new output and the original output. However, occlusion-based methods need to forward pass the model once for each feature, leading to low computational efficiency. To address this issue, gradient-based methods (Li et al., 2016; Ding et al., 2019; Brunner et al., 2020; Sundararajan et al., 2017) exploit the gradient information of features to approximate occlusion-based methods, which only require a single forward process. Similarly, propagation-based methods (Bach et al., 2015; Shrikumar et al., 2017) modify the back-propagation rules to redistribute the model output among the target features along the back-propagation path. Perturbation-based methods (Guan et al., 2019; Schulz et al., 2020; De Cao et al., 2020) add noise to features to examine their importance for model predictions. Attribution has been adopted in model compression techniques such as pruning (Michel et al., 2019) and adaptive inference (Modarressi et al., 2022) but has not been explored in knowledge distillation. In this work, we take the initiative to investigate the effect of attribution in knowledge distillation.
|
| 49 |
+
|
| 50 |
+
# 3 Methodology
|
| 51 |
+
|
| 52 |
+
# 3.1 Preliminary
|
| 53 |
+
|
| 54 |
+
Integrated Gradients (Sundararajan et al., 2017) is a theoretically tenable method to attribute the prediction of a deep network to its input or intermediate features. Formally, given a feature $\mathbf{x} = [x_{1}, x_{2}, \dots, x_{n}] \in \mathbb{R}^{n}$ with a baseline feature $\mathbf{x}' = [x_{1}', x_{2}', \dots, x_{n}'] \in \mathbb{R}^{n}$ , and the model function $F(\cdot)$ , IG leverages integral to represent the difference between $F(\mathbf{x})$ and $F(\mathbf{x}')$ by selecting a straight line path from $\mathbf{x}'$ to $\mathbf{x}$ as the integral path:
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
F (\mathbf {x}) - F \left(\mathbf {x} ^ {\prime}\right) = \sum_ {i = 1} ^ {n} \operatorname {I G} _ {i} (F, \mathbf {x}) = \tag {1}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\sum_ {i = 1} ^ {n} [ (x _ {i} - x _ {i} ^ {\prime}) \times \int_ {\alpha = 0} ^ {1} \frac {\partial F (x ^ {\prime} + \alpha \times (x - x ^ {\prime}))}{\partial x _ {i}} d \alpha ].
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
In practice, continual integral can be approximated by discrete summation:
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
\operatorname {I G} _ {i} ^ {a p p r o x} (F, \mathbf {x}) =
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\left(x _ {i} - x _ {i} ^ {\prime}\right) \times \sum_ {k = 1} ^ {m} \frac {\partial F \left(x ^ {\prime} + \frac {k}{m} \times \left(x - x ^ {\prime}\right)\right)}{\partial x _ {i}} \times \frac {1}{m}, \tag {2}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
where $m$ is the number of summation steps (a bigger $m$ usually results in better approximation). Intuitively, the magnitude of integrated gradient indicates its importance while its sign illustrates the positive or negative effect on the target output.
|
| 75 |
+
|
| 76 |
+
In this paper, we focus on Transformer-based architecture and attribute the model prediction to input features. With slight abuse of notation, we denote the input sequence as $\mathbf{x} = [x_1,x_2,\dots,x_n]$ , where $n$ is the sequence length and each $x_{i}$ represents a token. Transformer first converts the token sequence to $d$ -dimensional embedding sequence $\mathbf{E} = [\mathbf{e}_1,\mathbf{e}_2,\dots,\mathbf{e}_n]\in \mathbb{R}^{n\times d}$ through the embedding layer. And then the contextualized representations $\mathbf{H} = \mathrm{Transformer}(\mathbf{E})\in \mathbb{R}^{n\times d}$ are obtained after several layers of Transformer blocks. Finally, a task-specific head is applied on $\mathbf{H}$ to get the final output $P = [P_{1},P_{2},\ldots,P_{C}]\in \mathbb{R}^{C}$ , which is typically a probability distribution. Denote the mapping function $\mathbf{E}\rightarrow P_c$ as $F^c (.)$ , where $c$ represents the label of interest. In this case, our attribution map is computed on each individual dimension of each input embedding, which is denoted as $e_{ij}$ :
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\operatorname {I G} _ {i j} ^ {\text {a p p r o x}} \left(F ^ {c}, \mathbf {E}\right) =
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\left(e _ {i j} - e _ {i j} ^ {\prime}\right) \times \sum_ {k = 1} ^ {m} \frac {\partial F ^ {c} \left(\mathbf {E} ^ {\prime} + \frac {k}{m} \times \left(\mathbf {E} - \mathbf {E} ^ {\prime}\right)\right)}{\partial e _ {i j}} \times \frac {1}{m}. \tag {3}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
In the implementation, we stack $n$ [PAD] token embeddings as baseline features $\mathbf{E}'$ since they usually have no influence on the model prediction.
|
| 87 |
+
|
| 88 |
+
# 3.2 AD-KD
|
| 89 |
+
|
| 90 |
+
In this section, we elaborate on our proposed Attribution-Driven Knowledge Distillation (AD-KD), including attribution maps and attribution distillation. The overall framework of AD-KD is illustrated in Figure 2.
|
| 91 |
+
|
| 92 |
+
# 3.2.1 Attribution Maps
|
| 93 |
+
|
| 94 |
+
The attribution scores of a language model reflect the importance of different tokens towards the prediction, which contains valuable data-specific reasoning knowledge. The scores are computed among different tokens at different dimensions of a given model, using IG defined in Section 3.1. In this work, we do not take the sign into consideration, since the scores at different dimensions of the same token embedding would cannibalize each other when combining them into a token-level attribution score. This observation is consistent with the findings in (Atanasova et al., 2020).
|
| 95 |
+
|
| 96 |
+
When calculating the attribution scores, we observed that there exist certain dimensions whose attribution scores remain relatively low across different tokens. The attribution scores from these dimensions minimize the difference between important and unimportant tokens, which can be regarded
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
Figure 2: Overview of our AD-KD framework. The example in Figure 1 is taken as the input. AD-KD first extracts the attribution maps from the teacher model and then transfers the attribution-based knowledge to the student.
|
| 100 |
+
|
| 101 |
+
as noises. For better illustration, Figure 3 shows an example of sentence "seem weird and distanced" whose annotation is negative sentiment. It is clear that "weird" and "distance" are the keywords that contribute most to the prediction, whereas a proportion of dimensions of them present low attribution scores. To alleviate the influence of noisy dimensions in the input embeddings, we simply choose the top- $K$ dimensions with high attribution scores and filter out dimensions with low attribution scores. Formally, the attribution score of token $x_{i}$ with respect to the label $c$ in the teacher model can be calculated as:
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
a _ {i} ^ {t, c} = \left\| \operatorname {T o p K} \left(\mathbf {I G} _ {i} ^ {\text {a p p r o x}} \left(F ^ {t, c}, \mathbf {E} ^ {t}\right)\right) \right\| _ {2}, \tag {4}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where the superscript $t$ denotes the teacher model. Therefore, the attribution map of the teacher consists of a sequence of attribution scores:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathbf {a} ^ {t, c} = \left[ a _ {1} ^ {t, c}, a _ {2} ^ {t, c}, \dots , a _ {n} ^ {t, c} \right]. \tag {5}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
For the student, the extraction of attribution map is similar except that we consider all dimensions for two reasons. First, it reduces the difficulty of training. Second, the student is allowed to learn from the noiseless attribution map of the teacher.
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
a _ {i} ^ {s, c} = \left\| \mathbf {I G} _ {i} ^ {\text {a p p r o x}} \left(F ^ {s, c}, \mathbf {E} ^ {s}\right) \right\| _ {2}, \tag {6}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\mathbf {a} ^ {s, c} = [ a _ {1} ^ {s, c}, a _ {2} ^ {s, c}, \dots , a _ {n} ^ {s, c} ].
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
Considering that the teacher can make multiple decisions, each of which is associated with a probability, we further propose to extract multi-view attribution knowledge. Specifically, we extract the
|
| 124 |
+
|
| 125 |
+
attribute maps for all possible predictions of the model rather than a single prediction, e.g., the prediction with the maximum probability or the prediction corresponding to the ground-truth label. By transferring the multi-view attribution knowledge, the student can capture a more comprehensive understanding of the teacher's soft-label distribution. The multi-view attribution maps are defined as:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\mathbf {A} ^ {t} = \left\| _ {c = 1} ^ {C} \mathbf {a} ^ {t, c}, \mathbf {A} ^ {s} = \left\| _ {c = 1} ^ {C} \mathbf {a} ^ {s, c}, \right. \right. \tag {7}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
where $\parallel$ is the concatenation operation.
|
| 132 |
+
|
| 133 |
+
# 3.2.2 Attribution Distillation
|
| 134 |
+
|
| 135 |
+
Given the multi-view attribution maps, a straightforward strategy to transfer the knowledge is to directly minimize the difference between the two sets of maps in teacher and student models, with distance metrics like L2 distance (MSE):
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\left\| \mathbf {A} ^ {t} - \mathbf {A} ^ {s} \right\| _ {2}. \tag {8}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
However, one obvious shortcoming with this approach is that there may exist a magnitude gap between the attribution scores in teacher and student models at the early phase of distillation, since the teacher is already well-trained while the student has little attribution knowledge. Under this circumstance, the student is likely to fall into a local optimum. To enable smooth knowledge distillation, we normalize the attribution maps before minimizing the difference. Concretely, we first transform the single-view attribution maps into unit vectors:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\widetilde {\mathbf {a}} ^ {t, c} = \frac {\mathbf {a} ^ {t , c}}{\| \mathbf {a} ^ {t , c} \| _ {2}}, \widetilde {\mathbf {a}} ^ {s, c} = \frac {\mathbf {a} ^ {s , c}}{\| \mathbf {a} ^ {s , c} \| _ {2}}. \tag {9}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
(a) seem
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
(b) weird
|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
(c) and
|
| 155 |
+
Figure 3: An example from the SST-2 dataset (Socher et al., 2013). Given the sentence "seem weird and distanced" and its sentiment label negative, the distributions of absolute attribution scores among different tokens and dimensions are shown in subfigures (a)-(e). The model is a well-trained $\mathrm{BERT}_{\text {base }}$ (teacher) and the IG steps $m$ is set to 1.
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
(d) distance
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
(e) #d
|
| 162 |
+
|
| 163 |
+
Then we reformulate the normalized multi-view attribution maps in Eq. (7) as:
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
\widetilde {\mathbf {A}} ^ {t} = \left\| _ {c = 1} ^ {C} \widetilde {\mathbf {a}} ^ {t, c}, \widetilde {\mathbf {A}} ^ {s} = \right\| _ {c = 1} ^ {C} \widetilde {\mathbf {a}} ^ {s, c}. \tag {10}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
The normalized attribution maps only preserve the information of relative importance among tokens regardless of their absolute importance, which we believe is the crucial knowledge to transfer. Finally, we define the attribution distillation loss as:
|
| 170 |
+
|
| 171 |
+
$$
|
| 172 |
+
\mathcal {L} _ {\text {a t t r}} = \left\| \widetilde {\mathbf {A}} ^ {t} - \widetilde {\mathbf {A}} ^ {s} \right\| _ {2}. \tag {11}
|
| 173 |
+
$$
|
| 174 |
+
|
| 175 |
+
# 3.2.3 Overall Objective
|
| 176 |
+
|
| 177 |
+
We combine the original cross-entropy loss between the output of the student and the ground-truth label, the response-based loss (on the logits) (Hinton et al., 2015), and the proposed attribution-driven distillation loss to train the student model. The overall objective is defined as:
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
\mathcal {L} = (1 - \alpha) \mathcal {L} _ {c e} + \alpha \mathcal {L} _ {\text {l o g i t}} + \beta \mathcal {L} _ {\text {a t t r}}, \tag {12}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
where $\mathcal{L}_{ce} = -\log \sigma (z^s)[y]$ is the cross-entropy loss and $\mathcal{L}_{\mathrm{logit}} = \mathrm{KL}(\sigma (\frac{z^t}{\tau})||\sigma (\frac{z^s}{\tau}))$ is the loss on the output logits. And, $\alpha$ and $\beta$ are two hyperparameters, $\sigma$ is the softmax function, $y$ is the ground-truth label, $\tau$ is the temperature, and $z^t$ and $z^s$ are the output logits of the teacher and student models, respectively. $\mathrm{KL}(\cdot)$ denotes the KL-divergence.
|
| 184 |
+
|
| 185 |
+
# 4 Experimental Settings
|
| 186 |
+
|
| 187 |
+
# 4.1 Datasets
|
| 188 |
+
|
| 189 |
+
We evaluate our method on eight tasks of the GLUE benchmark (Wang et al., 2018), including CoLA (Warstadt et al., 2019), MNLI (Williams et al., 2018), SST-2 (Socher et al., 2013), QNLI (Rajpurkar et al., 2016), MRPC (Dolan and Brockett, 2005), QQP (Chen et al., 2018), RTE (Bentivogli et al., 2009) and STS-B (Cer et al., 2017). The details of these datasets are introduced in Appendix
|
| 190 |
+
|
| 191 |
+
A.1. For evaluation metrics, we follow previous works (Park et al., 2021; Liu et al., 2022) and report accuracy on MNLI, SST-2, QNLI, QQP and RTE, F1 score on MRPC, Matthews correlation coefficient on CoLA, and Spearman's rank correlation coefficient on STS-B.
|
| 192 |
+
|
| 193 |
+
# 4.2 Baseline Methods
|
| 194 |
+
|
| 195 |
+
We compare AD-KD with response-based KD methods and several state-of-the-art feature-based and relation-based KD methods. Response-based baselines include Vanilla KD (Hinton et al., 2015) and PD (Turc et al., 2019). Feature-based and relation-based baselines include PKD (Sun et al., 2019) which distills the hidden representations, TinyBERT (Jiao et al., 2020) which distills the self-attention matrices, and CKD (Park et al., 2021) and MGSKD (Liu et al., 2022) which distill the relation between hidden representations. For a fair comparison, MiniLM (Wang et al., 2020, 2021) and MobileBERT (Sun et al., 2020b) are not presented due to their two-stage distillation settings which involve both task-agnostic and task-specific distillation. Our AD-KD focuses on task-specific distillation and does not augment the training sets. Moreover, MGSKD (Liu et al., 2022) only reports results on a 4-layer BERT student model which is different from other baselines. To ensure a fair comparison, we re-implemented MGSKD using their released code to obtain a 6-layer student model. The original MGSKD approach also relies on span-level information that is extracted from external knowledge sources, which is not publicly available nor included in other baselines. Therefore, we did not use this external knowledge in our re-implementation of MGSKD.
|
| 196 |
+
|
| 197 |
+
# 4.3 Implementation Details
|
| 198 |
+
|
| 199 |
+
Our code is implemented in Pytorch with the Transformers package (Wolf et al., 2020). We fine
|
| 200 |
+
|
| 201 |
+
<table><tr><td>Model</td><td>#Params</td><td>CoLA (Mcc)</td><td>MNLI-(m/mm) (Acc)</td><td>SST-2 (Acc)</td><td>QNLI (Acc)</td><td>MRPC (F1)</td><td>QQP (Acc)</td><td>RTE (Acc)</td><td>STS-B (Spear)</td><td>Avg</td></tr><tr><td colspan="11">Dev</td></tr><tr><td>BERTbase(Teacher)</td><td>110M</td><td>60.3</td><td>84.9/84.8</td><td>93.7</td><td>91.7</td><td>91.4</td><td>91.5</td><td>69.7</td><td>89.4</td><td>84.1</td></tr><tr><td>BERT6(Student)</td><td>66M</td><td>51.2</td><td>81.7/82.6</td><td>91.0</td><td>89.3</td><td>89.2</td><td>90.4</td><td>66.1</td><td>88.3</td><td>80.9</td></tr><tr><td>Vanilla KD(Hinton et al., 2015)</td><td>66M</td><td>53.6</td><td>82.7/83.1</td><td>91.1</td><td>90.1</td><td>89.4</td><td>90.5</td><td>66.8</td><td>88.7</td><td>81.6</td></tr><tr><td>PD(Turc et al., 2019)</td><td>66M</td><td>-</td><td>82.5/83.4</td><td>91.1</td><td>89.4</td><td>89.4</td><td>90.7</td><td>66.7</td><td>-</td><td>-</td></tr><tr><td>PKD(Sun et al., 2019)</td><td>66M</td><td>45.5</td><td>81.3/-</td><td>91.3</td><td>88.4</td><td>85.7</td><td>88.4</td><td>66.5</td><td>86.2</td><td>79.2</td></tr><tr><td>TinyBERT(Jiao et al., 2020)</td><td>66M</td><td>53.8</td><td>83.1/83.4</td><td>92.3</td><td>89.9</td><td>88.8</td><td>90.5</td><td>66.9</td><td>88.3</td><td>81.7</td></tr><tr><td>CKD(Park et al., 2021)</td><td>66M</td><td>55.1</td><td>83.6/84.1</td><td>93.0</td><td>90.5</td><td>89.6</td><td>91.2</td><td>67.3</td><td>89.0</td><td>82.4</td></tr><tr><td>MGSKD(Liu et al., 2022)</td><td>66M</td><td>49.1</td><td>83.3/83.9</td><td>91.7</td><td>90.3</td><td>89.8</td><td>91.2</td><td>67.9</td><td>88.5</td><td>81.5</td></tr><tr><td>AD-KD</td><td>66M</td><td>58.3</td><td>83.4/84.2</td><td>91.9</td><td>91.2</td><td>91.2</td><td>91.2</td><td>70.9</td><td>89.2</td><td>83.4</td></tr><tr><td colspan="11">Test</td></tr><tr><td>BERTbase(Teacher)</td><td>110M</td><td>51.5</td><td>84.5/84.1</td><td>94.1</td><td>90.9</td><td>87.7</td><td>89.2</td><td>67.5</td><td>85.5</td><td>81.4</td></tr><tr><td>BERT6(Student)</td><td>66M</td><td>41.7</td><td>81.9/81.0</td><td>91.3</td><td>88.9</td><td>85.2</td><td>88.0</td><td>64.0</td><td>82.4</td><td>77.9</td></tr><tr><td>Vanilla KD(Hinton et al., 2015)</td><td>66M</td><td>42.3</td><td>82.7/81.8</td><td>92.0</td><td>89.3</td><td>86.3</td><td>88.2</td><td>65.0</td><td>82.7</td><td>78.6</td></tr><tr><td>PD(Turc et al., 2019)</td><td>66M</td><td>-</td><td>82.8/82.2</td><td>91.8</td><td>88.9</td><td>86.8</td><td>88.9</td><td>65.3</td><td>-</td><td>-</td></tr><tr><td>PKD(Sun et al., 2019)</td><td>66M</td><td>43.5</td><td>81.5/81.0</td><td>92.0</td><td>89.0</td><td>85.0</td><td>88.9</td><td>65.5</td><td>81.6</td><td>78.4</td></tr><tr><td>MGSKD(Liu et al., 2022)</td><td>66M</td><td>42.8</td><td>83.4/82.8</td><td>92.1</td><td>89.5</td><td>87.0</td><td>89.1</td><td>63.7</td><td>82.2</td><td>78.7</td></tr><tr><td>AD-KD</td><td>66M</td><td>47.0</td><td>83.1/82.6</td><td>91.8</td><td>90.0</td><td>87.1</td><td>88.9</td><td>65.8</td><td>83.4</td><td>79.6</td></tr></table>
|
| 202 |
+
|
| 203 |
+
Table 1: Overall results on the GLUE benchmark. The results of baselines except vanilla KD and MGSKD are imported from Park et al. (2021). Results of development sets are averaged over 3 runs and we submit the model with the highest score to the official GLUE server to obtain the results of test sets. Average score is computed excluding the MNLI-mm accuracy. The best results of the student models are shown in bold and the second best results are shown with underline. Results are statistically significant with p-value $< {0.005}$ .
|
| 204 |
+
|
| 205 |
+
tune $\mathrm{BERT}_{\mathrm{base}}$ as the teacher model, and utilize a smaller BERT released by Turc et al. (2019) with 6 Transformer layers, 768 hidden neurons and 12 attention heads to instantiate the student model following Park et al. (2021). We search for the optimal learning rate in $\{2\mathrm{e} - 5,3\mathrm{e} - 5,4\mathrm{e} - 5,5\mathrm{e} - 5\}$ , $\alpha$ in $\{0.8,0.9,1.0\}$ and temperature $\tau$ in $\{1,2,3,4\}$ . For the hyperparameter $\beta$ , we tune within $\{1,10,50,100\}$ . For the IG steps $m$ described in Section 3.1, we adopt $m = 1$ in the main results due to the huge computational overhead. Part of results with $m$ varying from 1 to 8 are reported in Section 5.4. $K$ is empirically searched within $\{384,512,640,700,734,768\}$ . Results with different values of $K$ are also reported. The detailed hyperparameter settings and training cost are provided in Appendix A.2. Our code is available at https://github.com/bruceesy/AD-KD.
|
| 206 |
+
|
| 207 |
+
# 5 Results and Analysis
|
| 208 |
+
|
| 209 |
+
# 5.1 Main Results
|
| 210 |
+
|
| 211 |
+
The main results are presented in Table 1. It can be seen that AD-KD outperforms all baselines on most of the datasets. Specifically, AD-KD yields an average improvement of 1.0 and 1.9 points over CKD and MGSKD respectively on development sets, and another average improvement of 0.9 points over MGSKD on test sets. Note that other feature-
|
| 212 |
+
|
| 213 |
+
based and relation-based KD methods even underperform vanilla KD, indicating the difficulty of aligning the teacher and the student at intermediate layers. In contrast, AD-KD distills the attribution knowledge from a global perspective which is more data-specific and shows significant improvement over vanilla KD. We provide two cases in Appendix C.3 to intuitively demonstrate the strength of AD-KD. We also observe that AD-KD does not show a satisfying performance on SST-2. We believe the reason is that the sentences in SST-2 are much shorter than those in other datasets, and in this case, the student is likely to already capture the attribution knowledge implicitly from the soft-labels of the teacher (Zhang et al., 2022).
|
| 214 |
+
|
| 215 |
+
# 5.2 Ablation Study
|
| 216 |
+
|
| 217 |
+
Impact of Loss Terms To analyze the impact of different loss terms, we conduct ablation experiments on three variants of AD-KD: (1) AD-KD without attribution distillation (i.e., vanilla KD), (2) AD-KD without the original cross-entropy loss, and (3) AD-KD without logit distillation. As reported in Table 2, again we observe an obvious performance drop after removing the attribution distillation. We also note that removing either the conventional cross-entropy loss or logit distillation loss causes noticeable performance degradation, suggesting both of them contribute to the improve
|
| 218 |
+
|
| 219 |
+
<table><tr><td>Method</td><td>CoLA (Mcc)</td><td>MNLI-(m/mm) (Acc)</td><td>SST-2 (Acc)</td><td>QNLI (Acc)</td><td>MRPC (F1)</td><td>QQP (Acc)</td><td>RTE (Acc)</td><td>STS-B (Spear)</td></tr><tr><td>AD-KD</td><td>58.3</td><td>83.4/84.2</td><td>91.9</td><td>91.2</td><td>91.2</td><td>91.2</td><td>70.9</td><td>89.2</td></tr><tr><td>w/o Lattr</td><td>53.6</td><td>82.7/83.1</td><td>91.2</td><td>90.2</td><td>89.2</td><td>90.5</td><td>67.5</td><td>88.9</td></tr><tr><td>w/o Lce</td><td>57.8</td><td>83.6/84.1</td><td>91.3</td><td>90.8</td><td>90.8</td><td>91.2</td><td>69.3</td><td>88.9</td></tr><tr><td>w/o Logit</td><td>53.9</td><td>81.9/82.8</td><td>91.1</td><td>90.5</td><td>89.9</td><td>90.9</td><td>68.6</td><td>88.8</td></tr></table>
|
| 220 |
+
|
| 221 |
+
Table 2: Ablation study of different loss terms. The results are based on GLUE development sets.
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Figure 4: Ablation study of multi-view attribution on the MNLI development set.
|
| 225 |
+
|
| 226 |
+
ment of AD-KD. Nevertheless, our attribution distillation contributes most to the performance of AD-KD, showing that data-specific reasoning information is crucial in knowledge distillation.
|
| 227 |
+
|
| 228 |
+
Multi-view Attribution In AD-KD, the student learns the attribution knowledge from a variety of possible outputs to get a better understanding of the teacher. Here we study how the number of attribution views affects the final results. Experiments are conducted on MNLI which is a multi-classification task including three labels: entailment, contradiction, and neutral. We make a comparison between multi-view attribution and single-view attribution w.r.t. each candidate label respectively. The results are shown in Figure 4, from which we note that each of the single-view attributions plays a positive role and is superior to vanilla KD. Moreover, combining all attribution views yields further performance improvement, demonstrating that multiview attribution is more preferable for distillation.
|
| 229 |
+
|
| 230 |
+
Student Model Size To investigate whether AD-KD can boost the performance across different sizes of student, we further compare AD-KD with vanilla KD on MRPC and QNLI under various student scales provided by Turc et al. (2019). As observed in Figure 5, AD-KD consistently outperforms vanilla KD, which validates the effectiveness and stability of our approach.
|
| 231 |
+
|
| 232 |
+
# 5.3 Impact of Top- $K$
|
| 233 |
+
|
| 234 |
+
Recall that in order to eliminate the interference of noisy dimension, AD-KD adopts the top- $K$ ap
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
Figure 5: Results of AD-KD and vanilla KD on MRPC and QNLI development sets at different student scales.
|
| 240 |
+
|
| 241 |
+
proach on the input embeddings of the teacher to filter out the dimensions with relatively low attribution scores. In this section, we conduct in-depth analysis on the impact of $K$ . We conduct experiments on STS-B and QNLI, and plot the results with different values of $K$ in Figure 6. As illustrated in the figure, the performance on the small dataset STS-B (7k) first improves as $K$ increases and then slightly degrades after $K$ exceeds 600. However, the performance on the larger dataset QNLI (108k) improves almost monotonically with the increasing of $K$ . We conjecture that choosing a suitable $K$ is beneficial on small datasets since there are probably more noisy dimensions in the input embeddings of the teacher, while preserving all dimensions may be preferable on larger datasets.
|
| 242 |
+
|
| 243 |
+
# 5.4 Impact of IG Steps
|
| 244 |
+
|
| 245 |
+
In our experiments, the IG steps $m$ are set to 1 by default when extracting the attribution maps. In this section, we provide more results with different values of $m$ in Figure 7 to understand its impact on distillation. We observe that as $m$ increases,
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
Figure 6: Results on STS-B and QNLI development sets as the number $(K)$ of retained dimensions changes.
|
| 251 |
+
Figure 7: Results on MRPC and QNLI development sets as the number $(m)$ of IG steps changes.
|
| 252 |
+
|
| 253 |
+
the performance of AD-KD fluctuates in a certain range. Although it is possible to find a point that surpasses our default setting and even the teacher, identifying the optimal value of $m$ for each task is costly since a large $m$ causes huge computational overhead. In contrast, $m = 1$ achieves better trade-off between performance and computational cost.
|
| 254 |
+
|
| 255 |
+
<table><tr><td>Attribution Layer</td><td>MRPC (F1)</td><td>QNLI (Acc)</td></tr><tr><td>input</td><td>91.2</td><td>91.2</td></tr><tr><td>first</td><td>90.5</td><td>90.9</td></tr><tr><td>penultimate</td><td>90.4</td><td>90.9</td></tr><tr><td>uniform</td><td>90.6</td><td>91.1</td></tr><tr><td>input & uniform</td><td>90.1</td><td>90.6</td></tr></table>
|
| 256 |
+
|
| 257 |
+
Table 3: Results of different attribution layers on MRPC and QNLI development sets.
|
| 258 |
+
|
| 259 |
+
# 5.5 Attribution Distillation Layer
|
| 260 |
+
|
| 261 |
+
Apart from the attribution knowledge of input layer, the attribution knowledge of intermediate layers can also be transferred during distillation. To confirm the motivation that the former is better than the latter, we conduct experiments on MRPC and QNLI with different attribution layers. Specifically, we choose the first layer and the penultimate layer for comparison. Besides, we also try a uniform strategy which is widely adopted as the mapping function between the teacher and the student layers (Jiao et al., 2020; Park et al., 2021; Liu et al., 2022). From the results shown in Table 3, we see that uniform mapping strategy performs best among
|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
Figure 8: Results on MRPC and QNLI development sets as $\alpha$ and $\beta$ changes.
|
| 267 |
+
|
| 268 |
+
intermediate layer methods. However, neither of these intermediate layers outperforms input layer, indicating that the attribution knowledge of intermediate layers is more model-specific and difficult to transfer. In addition, distilling the knowledge jointly from the input and the intermediate layers does not improve the performance.
|
| 269 |
+
|
| 270 |
+
# 5.6 Impact of $\alpha$ and $\beta$
|
| 271 |
+
|
| 272 |
+
For the training objective of AD-KD, we introduce $\alpha$ and $\beta$ to balance the original cross-entropy loss, logit distillation loss, and attribution distillation loss. To investigate their impact on model performance, we show the results of different values of $\alpha$ and $\beta$ on MRPC and QNLI in Figure 8, where we fix one while altering the other. We observe a unified trend across different tasks that when $\alpha$ is small, the student does not perform well due to the lack of response-based knowledge of the teacher, and when $\alpha$ is around 0.9, the student performs best. Therefore, we select $\alpha$ close to 1. We also observe from the figure that as $\beta$ increases, the performance first keeps improving and reaches the peak, then it starts to decline. Unlike $\alpha$ , however, the optimal value of $\beta$ varies with different tasks, indicating that $\beta$ is more sensitive to the task compared to $\alpha$ . More discussion of $\beta$ are given in Appendix C.2.
|
| 273 |
+
|
| 274 |
+
# 6 Conclusion
|
| 275 |
+
|
| 276 |
+
In this paper, we propose AD-KD, a novel knowledge distillation framework for language model compression. Unlike other distillation methods, AD-KD investigates the model knowledge from the perspective of input attribution, which is vital yet easy to transfer between the teacher and the
|
| 277 |
+
|
| 278 |
+
student. Moreover, top- $K$ method is adopted to obtain noiseless attribution maps among input tokens, and multi-view attribution is conducted for a more comprehensive distillation. To our knowledge, this is the first work that incorporates attribution into knowledge distillation. Extensive experiments including ablation studies are carried out to show the effectiveness of AD-KD and its components. With the recent emergence of large language models (LLMs), gradient-based attribution methods are infeasible due to the unavailable parameters. However, the idea of AD-KD can still be potentially extended to these black-box models by using occlusion-based attribution or using chain-of-thoughts (Wei et al., 2022) as the rationale for distillation. We will leave it to future work.
|
| 279 |
+
|
| 280 |
+
# Acknowledgements
|
| 281 |
+
|
| 282 |
+
This work was supported by the National Natural Science Foundation of China (No. 62176270), the Guangdong Basic and Applied Basic Research Foundation (No. 2023A1515012832), and the Program for Guangdong Introducing Innovative and Entrepreneurial Teams (No. 2017ZT07X355).
|
| 283 |
+
|
| 284 |
+
# Limitations
|
| 285 |
+
|
| 286 |
+
This work introduces the general idea of incorporating attribution into knowledge distillation, and there are three potential limitations. First, although AD-KD chooses Integrated Gradients for attribution, there are actually other attribution methods (Janizek et al., 2021; Sikdar et al., 2021) which can also be fitted in our framework. The question of whether these methods perform better than Integrated Gradients when combined with knowledge distillation is still unclear. Second, we conduct experiments on BERT of different scales and have not yet validated the effectiveness of AD-KD on other model structures. Third, while we only perform task-specific knowledge distillation in our experiments, applying AD-KD to task-agnostic knowledge distillation is also worth investigating.
|
| 287 |
+
|
| 288 |
+
# Ethics Statement
|
| 289 |
+
|
| 290 |
+
Our work will not cause ethical issues and the datasets used in this paper are publicly available.
|
| 291 |
+
|
| 292 |
+
# References
|
| 293 |
+
|
| 294 |
+
Marco Ancona, Enea Ceolini, Cengiz Öztireli, and Markus Gross. 2018. Towards better understand
|
| 295 |
+
|
| 296 |
+
ing of gradient-based attribution methods for deep neural networks. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30-May 3, 2018, Conference Track Proceedings. OpenReview.net.
|
| 297 |
+
Pepa Atanasova, Jakob Grue Simonsen, Christina Lioma, and Isabelle Augenstein. 2020. A diagnostic study of explainability techniques for text classification. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3256-3274, Online. Association for Computational Linguistics.
|
| 298 |
+
Sebastian Bach, Alexander Binder, Gregoire Montavon, Frederick Klauschen, Klaus-Robert Müller, and Wojciech Samek. 2015. On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation. *PloS one*, 10(7):e0130140.
|
| 299 |
+
David Baehrens, Timon Schroeter, Stefan Harmeling, Motoaki Kawanabe, Katja Hansen, and Klaus-Robert Müller. 2010. How to explain individual classification decisions. The Journal of Machine Learning Research, 11:1803-1831.
|
| 300 |
+
Jasmijn Bastings and Katja Filippova. 2020. The elephant in the interpretability room: Why use attention as explanation when we have saliency methods? In Proceedings of the Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP, pages 149-155, Online. Association for Computational Linguistics.
|
| 301 |
+
Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. 2009. The fifth pascal recognizing textual entailment challenge. In TAC.
|
| 302 |
+
Gino Brunner, Yang Liu, Damian Pascual, Oliver Richter, Massimiliano Ciaramita, and Roger Wattenhofer. 2020. On identifiability in transformers. In 8th International Conference on Learning Representations (ICLR 2020)(virtual). International Conference on Learning Representations.
|
| 303 |
+
Daniel Cer, Mona Diab, Eneko Agirre, Inigo Lopez-Gazpio, and Lucia Specia. 2017. SemEval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 1-14, Vancouver, Canada. Association for Computational Linguistics.
|
| 304 |
+
Zihan Chen, Hongbo Zhang, Xiaoji Zhang, and Leqi Zhao. 2018. Quora question pairs. URL https://www.kaggle.com/c/quora-question-pairs.
|
| 305 |
+
Nicola De Cao, Michael Sejr Schlichtkrull, Wilker Aziz, and Ivan Titov. 2020. How do decisions emerge across layers in neural models? interpretation with differentiable masking. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3243-3255, Online. Association for Computational Linguistics.
|
| 306 |
+
|
| 307 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 308 |
+
Shuoyang Ding, Hainan Xu, and Philipp Koehn. 2019. Saliency-driven word alignment interpretation for neural machine translation. In Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers), pages 1-12, Florence, Italy. Association for Computational Linguistics.
|
| 309 |
+
William B. Dolan and Chris Brockett. 2005. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005).
|
| 310 |
+
Jianping Gou, Baosheng Yu, Stephen J Maybank, and Dacheng Tao. 2021. Knowledge distillation: A survey. International Journal of Computer Vision, 129(6):1789-1819.
|
| 311 |
+
Chaoyu Guan, Xiting Wang, Quanshi Zhang, Runjin Chen, Di He, and Xing Xie. 2019. Towards a deep and unified understanding of deep neural models in nlp. In International Conference on Machine Learning, pages 2454-2463. PMLR.
|
| 312 |
+
Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531.
|
| 313 |
+
Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3543-3556, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 314 |
+
Joseph D Janizek, Pascal Sturmfels, and Su-In Lee. 2021. Explaining explanations: Axiomatic feature interactions for deep networks. Journal of Machine Learning Research, 22:1-54.
|
| 315 |
+
Xiaoqi Jiao, Yichun Yin, Lifeng Shang, Xin Jiang, Xiao Chen, Linlin Li, Fang Wang, and Qun Liu. 2020. TinyBERT: Distilling BERT for natural language understanding. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 4163-4174, Online. Association for Computational Linguistics.
|
| 316 |
+
Daniel Khashabi, Snigdha Chaturvedi, Michael Roth, Shyam Upadhyay, and Dan Roth. 2018. Looking beyond the surface: A challenge set for reading comprehension over multiple sentences. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics:
|
| 317 |
+
|
| 318 |
+
Human Language Technologies, Volume 1 (Long Papers), pages 252-262, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 319 |
+
Jiwei Li, Xinlei Chen, Eduard Hovy, and Dan Jurafsky. 2016. Visualizing and understanding neural models in NLP. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 681-691, San Diego, California. Association for Computational Linguistics.
|
| 320 |
+
Chen Liang, Simiao Zuo, Qingru Zhang, Pengcheng He, Weizhu Chen, and Tuo Zhao. 2022. Less is more: Task-aware layer-wise distillation for language model compression. arXiv preprint arXiv:2210.01351.
|
| 321 |
+
Chang Liu, Chongyang Tao, Jiazhan Feng, and Dongyan Zhao. 2022. Multi-granularity structural knowledge distillation for language model compression. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1001-1011, Dublin, Ireland. Association for Computational Linguistics.
|
| 322 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.
|
| 323 |
+
Paul Michel, Omer Levy, and Graham Neubig. 2019. Are sixteen heads really better than one? In Proceedings of the 33rd International Conference on Neural Information Processing Systems, pages 14037-14047.
|
| 324 |
+
Ali Modarressi, Hosein Mohebbi, and Mohammad Taher Pilehvar. 2022. AdapLeR: Speeding up inference by adaptive length reduction. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1-15, Dublin, Ireland. Association for Computational Linguistics.
|
| 325 |
+
Geondo Park, Gyeongman Kim, and Eunho Yang. 2021. Distilling linguistic context for language model compression. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 364-378, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 326 |
+
Damian Pascual, Gino Brunner, and Roger Wattenhofer. 2021. Telling BERT's full story: from local attention to global aggregation. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 105-124, Online. Association for Computational Linguistics.
|
| 327 |
+
Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392, Austin, Texas. Association for Computational Linguistics.
|
| 328 |
+
|
| 329 |
+
Victor Sanh, Lysandre Debut, Julien Chaumont, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.
|
| 330 |
+
Karl Schulz, Leon Sixt, Federico Tombari, and Tim Landgraf. 2020. Restricting the flow: Information bottlenecks for attribution. arXiv preprint arXiv:2001.00396.
|
| 331 |
+
Avanti Shrikumar, Peyton Greenside, and Anshul Kundaje. 2017. Learning important features through propagating activation differences. In International Conference on Machine Learning, pages 3145-3153. PMLR.
|
| 332 |
+
Sandipan Sikdar, Parantapa Bhattacharya, and Kieran Heese. 2021. Integrated directional gradients: Feature interaction attribution for neural NLP models. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 865-878, Online. Association for Computational Linguistics.
|
| 333 |
+
Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1631-1642, Seattle, Washington, USA. Association for Computational Linguistics.
|
| 334 |
+
Siqi Sun, Yu Cheng, Zhe Gan, and Jingjing Liu. 2019. Patient knowledge distillation for BERT model compression. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4323-4332, Hong Kong, China. Association for Computational Linguistics.
|
| 335 |
+
Siqi Sun, Zhe Gan, Yuwei Fang, Yu Cheng, Shuohang Wang, and Jingjing Liu. 2020a. Contrastive distillation on intermediate representations for language model compression. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 498-508, Online. Association for Computational Linguistics.
|
| 336 |
+
Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. 2020b. Mobile-BERT: a compact task-agnostic BERT for resource-limited devices. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2158–2170, Online. Association for Computational Linguistics.
|
| 337 |
+
Mukund Sundararajan, Ankur Taly, and Qiqi Yan. 2017. Axiomatic attribution for deep networks. In International Conference on Machine Learning, pages 3319-3328. PMLR.
|
| 338 |
+
|
| 339 |
+
Iulia Turc, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Well-read students learn better: On the importance of pre-training compact models. arXiv preprint arXiv:1908.08962.
|
| 340 |
+
Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2019. Superglue: a stickier benchmark for general-purpose language understanding systems. In Proceedings of the 33rd International Conference on Neural Information Processing Systems, pages 3266-3280.
|
| 341 |
+
Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.
|
| 342 |
+
Wenhui Wang, Hangbo Bao, Shaohan Huang, Li Dong, and Furu Wei. 2021. MiniLMv2: Multi-head self-attention relation distillation for compressing pretrained transformers. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 2140-2151, Online. Association for Computational Linguistics.
|
| 343 |
+
Wenhui Wang, Furu Wei, Li Dong, Hangbo Bao, Nan Yang, and Ming Zhou. 2020. Minilm: Deep self-attention distillation for task-agnostic compression of pre-trained transformers. Advances in Neural Information Processing Systems, 33:5776-5788.
|
| 344 |
+
Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. 2019. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641.
|
| 345 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022. Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903.
|
| 346 |
+
Sarah Wegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 11-20, Hong Kong, China. Association for Computational Linguistics.
|
| 347 |
+
Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 348 |
+
|
| 349 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 350 |
+
Song Xu, Haoran Li, Peng Yuan, Youzheng Wu, Xiaodong He, and Bowen Zhou. 2020. Self-attention guided copy mechanism for abstractive summarization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1355–1362, Online. Association for Computational Linguistics.
|
| 351 |
+
Matthew D Zeiler and Rob Fergus. 2014. Visualizing and understanding convolutional networks. In 13th European Conference on Computer Vision, ECCV 2014, pages 818-833. Springer Verlag.
|
| 352 |
+
Quanshi Zhang, Xu Cheng, Yilan Chen, and Zhefan Rao. 2022. Quantifying the knowledge in a cnn to explain knowledge distillation for classification. IEEE Transactions on Pattern Analysis & Machine Intelligence, (01):1-17.
|
| 353 |
+
|
| 354 |
+
# A Experimental Details
|
| 355 |
+
|
| 356 |
+
# A.1 Details of Datasets
|
| 357 |
+
|
| 358 |
+
We evaluate AD-KD on eight tasks of GLUE benchmark (Wang et al., 2018). Specifically, there are two single-sentence tasks: CoLA (Warstadt et al., 2019) which aims to predict if the given sentence is grammatically correct, and SST-2 (Socher et al., 2013) which aims to predict the sentiment of the given sentence; two paraphrase tasks: MRPC (Dolan and Brockett, 2005) which aims to predict if two given sentences are semantically equivalent, and QQP (Chen et al., 2018) which is similar to MRPC; three inference tasks which aim to predict if the premise entails the hypothesis: MNLI (Williams et al., 2018), QNLI (Rajpurkar et al., 2016), and RTE (Bentivogli et al., 2009); and one similarity task: STS-B (Cer et al., 2017) which aims to predict a continual score measuring the semantic similarity between a pair of sentences. The statistics of these datasets are shown in Table 4.
|
| 359 |
+
|
| 360 |
+
<table><tr><td>Task</td><td>#Train</td><td>#Dev</td><td>#Test</td><td>#Label</td></tr><tr><td colspan="5">Single-Sentence Classification</td></tr><tr><td>CoLA</td><td>8.5k</td><td>1k</td><td>1k</td><td>2</td></tr><tr><td>SST-2</td><td>67k</td><td>872</td><td>1.8k</td><td>2</td></tr><tr><td colspan="5">Pairwise Text Classification</td></tr><tr><td>MNLI</td><td>393k</td><td>20k</td><td>20k</td><td>3</td></tr><tr><td>QNLI</td><td>108k</td><td>5.7k</td><td>5.7k</td><td>2</td></tr><tr><td>MRPC</td><td>3.7k</td><td>408</td><td>1.7k</td><td>2</td></tr><tr><td>QQP</td><td>364k</td><td>40k</td><td>391k</td><td>2</td></tr><tr><td>RTE</td><td>2.5k</td><td>276</td><td>3k</td><td>2</td></tr><tr><td colspan="5">Text Similarity</td></tr><tr><td>STS-B</td><td>7k</td><td>1.5k</td><td>1.4k</td><td>1</td></tr></table>
|
| 361 |
+
|
| 362 |
+
# A.2 Hyperparameter Settings
|
| 363 |
+
|
| 364 |
+
We run all experiments on GeForce RTX 2080 Ti GPUs. Table 5 presents the hyperparameter settings and training costs of AD-KD on GLUE tasks. Generally, AD-KD runs 1.2 to 3 times slower compared to vanilla KD on different tasks, due to the extra back-propagation. However, all students obtained by different distillation methods have the same inference speed.
|
| 365 |
+
|
| 366 |
+
# B More Discussion
|
| 367 |
+
|
| 368 |
+
In this section, we discuss the difference between distilling the attribution maps and distilling the attention matrices. In a sense, attention matrices are similar to attribution maps since they both reflect the contribution that each input token makes on a
|
| 369 |
+
|
| 370 |
+
Table 4: Statistics of the GLUE datasets.
|
| 371 |
+
|
| 372 |
+
<table><tr><td>Hyperparameter</td><td>CoLA</td><td>MNLI</td><td>SST-2</td><td>QNLI</td><td>MRPC</td><td>QQP</td><td>RTE</td><td>STS-B</td></tr><tr><td>Learning Rate</td><td>4e-5</td><td>4e-5</td><td>5e-5</td><td>4e-5</td><td>3e-5</td><td>4e-5</td><td>2e-5</td><td>5e-5</td></tr><tr><td>Total Batch Size</td><td>32</td><td>64</td><td>32</td><td>32</td><td>16</td><td>32</td><td>16</td><td>16</td></tr><tr><td>Max Seq. Length</td><td>128</td><td>128</td><td>128</td><td>128</td><td>128</td><td>128</td><td>128</td><td>128</td></tr><tr><td>α</td><td>0.9</td><td>0.8</td><td>0.8</td><td>0.9</td><td>0.9</td><td>1.0</td><td>0.9</td><td>0.8</td></tr><tr><td>β</td><td>1</td><td>10</td><td>1</td><td>100</td><td>10</td><td>50</td><td>10</td><td>1</td></tr><tr><td>τ</td><td>1</td><td>3</td><td>2</td><td>3</td><td>4</td><td>4</td><td>2</td><td>3</td></tr><tr><td>K</td><td>768</td><td>768</td><td>768</td><td>768</td><td>768</td><td>734</td><td>700</td><td>640</td></tr><tr><td>m</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td># GPU</td><td>1</td><td>4</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Training Time</td><td>30min</td><td>12hr</td><td>2.5hr</td><td>3hr</td><td>20min</td><td>16hr</td><td>12min</td><td>20min</td></tr></table>
|
| 373 |
+
|
| 374 |
+
model prediction to some extent (Bastings and Filippova, 2020; Xu et al., 2020). However, there are several drawbacks when it comes to distillation. On one hand, attention correlates well with attribution locally in specific layers and heads but not globally, indicating that attention maps are inadequate to draw conclusions that refer to the input of the model (Pascual et al., 2021). In other words, attention matrices are more like model-specific knowledge that are probably challenging for the student to learn due to the layer mapping issue, especially when the student has much fewer parameters than the teacher. On the other hand, some works point out that by adversarial training, alternative attention weights can be found whereas the prediction remains almost the same (Jain and Wallace, 2019; Wiegreffe and Pinter, 2019). Therefore, an optimal student unnecessarily shares similar attention matrices with its teacher. Our proposed AD-KD adopts a more reliable gradient-based method to obtain the attribution maps, which is shown better than attention matrices employed by baselines.
|
| 375 |
+
|
| 376 |
+
# C More Experimental Results
|
| 377 |
+
|
| 378 |
+
# C.1 Results on MultiRC
|
| 379 |
+
|
| 380 |
+
Considering that the text in GLUE is relatively short (with Max_SeLength set to 128), We conduct additional experiments on SuperGLUE (Wang et al., 2019) for more comprehensive evaluation. We select a challenging QA task, MultiRC (Khashabi et al., 2018), with much longer text (with Max_SeLength set to 512) which requires more attribution knowledge. As shown in Table 6, ADKD improves $0.97\%$ over vanilla KD and $0.38\%$ over MGSKD. Moreover, the performance of ADKD is on par with the teacher.
|
| 381 |
+
|
| 382 |
+
Table 5: Hyperparameter settings and training cost.
|
| 383 |
+
|
| 384 |
+
<table><tr><td>Model</td><td>#Params</td><td>Acc</td></tr><tr><td>BERTbase(Teacher)</td><td>110M</td><td>68.53</td></tr><tr><td>Vanilla KD</td><td>66M</td><td>67.70</td></tr><tr><td>MGSKD</td><td>66M</td><td>68.29</td></tr><tr><td>AD-KD</td><td>66M</td><td>68.67</td></tr></table>
|
| 385 |
+
|
| 386 |
+
Table 6: Results on MultiRC development set.
|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
(a) MRPC
|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
(b) QNLI
|
| 393 |
+
Figure 9: Comparison of the attribution gap between teacher and student on training set and development set.
|
| 394 |
+
|
| 395 |
+
# C.2 Overfitting Study
|
| 396 |
+
|
| 397 |
+
In this section, we investigate whether the overfitting problem would happen in attribution distillation. Using Eq. (11), we calculate the attribution gap between the teacher and the student models on the training and development sets of MRPC and QNLI respectively, and show the results in Figure 9. By altering $\beta$ , the tendency of attribution gap on development sets is consistent with the one on training sets, which indicates that the attribution knowledge learned from training data can be well generalized to unseen data. Therefore, overfitting tends not to happen in attribution distillation.
|
| 398 |
+
|
| 399 |
+
# C.3 Case Study
|
| 400 |
+
|
| 401 |
+
In this section, we provide two examples to show how AD-KD facilitates the imitation of the teacher's reasoning and outperforms vanilla KD. As shown in Figure 10, vanilla KD makes mistakes by ignoring keyword *Louisiana* or emphasizing an irrelevant word *billion*. In contrast, the attribution maps of AD-KD are more consistent with the ones in the teacher. AD-KD learns what to and not to focus on and thus predicts the label correctly.
|
| 402 |
+
|
| 403 |
+
Case #1
|
| 404 |
+
|
| 405 |
+
<table><tr><td rowspan="3">Teacher</td><td>Question:</td><td>What land was ceded to Spain?</td></tr><tr><td>Sentence:</td><td>It ceded French lousiana west or the mississippil river including new orleans / to its / spain in compensation for spain / loss to brian or florida</td></tr><tr><td colspan="2">Prediction: Entailment</td></tr><tr><td rowspan="3">Vanilla KD</td><td>Question:</td><td>What land was ceded to Spain?</td></tr><tr><td>Sentence:</td><td>It ceded French lousiana west or the mississippil river including new orleans / to its / spain in compensation for spain / loss to brian or florida</td></tr><tr><td colspan="2">Prediction: Not entailment</td></tr><tr><td rowspan="3">AD-KD</td><td>Question:</td><td>What land was ceded to spain?</td></tr><tr><td>Sentence:</td><td>It ceded French lousiana west or the mississippil river including new orleans / to its / spain in compensation for spain / loss to brian or florida</td></tr><tr><td colspan="2">Prediction: Entailment</td></tr></table>
|
| 406 |
+
|
| 407 |
+
Case #2
|
| 408 |
+
|
| 409 |
+
<table><tr><td rowspan="3">Teacher</td><td colspan="10">Question: what magnitude was the 1994 north earthquake?</td></tr><tr><td>Sentence: it caused the most property damage or any earthquake in u</td><td>history</td><td>estimated</td><td>at over</td><td>$</td><td>20 billion</td><td></td><td></td><td></td><td></td></tr><tr><td colspan="10">Prediction: Not entailment</td></tr><tr><td rowspan="3">Vanilla KD</td><td colspan="10">Question: what magnitude was the 1994 north earthquake?</td></tr><tr><td>Sentence: it caused the most property damage or any earthquake in u</td><td>history</td><td>estimated</td><td>at over</td><td>$</td><td>20 billion</td><td></td><td></td><td></td><td></td></tr><tr><td colspan="10">Prediction: Entailment</td></tr><tr><td rowspan="3">AD-KD</td><td colspan="10">Question: what magnitude was the 1994 north earthquake?</td></tr><tr><td>Sentence: it caused the most property damage or any earthquake in u</td><td>history</td><td>estimated</td><td>at over</td><td>$</td><td>20 billion</td><td></td><td></td><td></td><td></td></tr><tr><td colspan="10">Prediction: Not entailment</td></tr></table>
|
| 410 |
+
|
| 411 |
+
Figure 10: Two illustrative examples of attribution maps and predictions by teacher, vanilla KD and AD-KD from the QNLI development set, where darker colors mean larger attribution scores. In case #1, AD-KD learns which tokens to focus on (Louisiana), while in case #2, AD-KD learns which tokens not to focus on (billion).
|
| 412 |
+
|
| 413 |
+
A For every submission:
|
| 414 |
+
|
| 415 |
+
A1. Did you describe the limitations of your work? Section Limitations
|
| 416 |
+
A2. Did you discuss any potential risks of your work? Section Ethics Statement
|
| 417 |
+
A3. Do the abstract and introduction summarize the paper's main claims? Section Abstract and Section 1
|
| 418 |
+
A4. Have you used AI writing assistants when working on this paper? Left blank.
|
| 419 |
+
|
| 420 |
+
B Did you use or create scientific artifacts?
|
| 421 |
+
|
| 422 |
+
Section 4
|
| 423 |
+
|
| 424 |
+
B1. Did you cite the creators of artifacts you used? Section 4.1 and Section 4.3
|
| 425 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts? Not applicable. Left blank.
|
| 426 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? The datasets we use are consistent with those used in previous works.
|
| 427 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it? Not applicable. Left blank.
|
| 428 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? Section 4.1
|
| 429 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be. Appendix A.1
|
| 430 |
+
|
| 431 |
+
C Did you run computational experiments?
|
| 432 |
+
|
| 433 |
+
Section 5
|
| 434 |
+
|
| 435 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? Appendix A.2
|
| 436 |
+
|
| 437 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values?
|
| 438 |
+
|
| 439 |
+
Section 4.3 and Appendix A.2
|
| 440 |
+
|
| 441 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 442 |
+
|
| 443 |
+
Section 5.1
|
| 444 |
+
|
| 445 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?
|
| 446 |
+
|
| 447 |
+
Section 4.3
|
| 448 |
+
|
| 449 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 450 |
+
|
| 451 |
+
Left blank.
|
| 452 |
+
|
| 453 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.?
|
| 454 |
+
|
| 455 |
+
No response.
|
| 456 |
+
|
| 457 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)?
|
| 458 |
+
|
| 459 |
+
No response.
|
| 460 |
+
|
| 461 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used?
|
| 462 |
+
|
| 463 |
+
No response.
|
| 464 |
+
|
| 465 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board?
|
| 466 |
+
|
| 467 |
+
No response.
|
| 468 |
+
|
| 469 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data?
|
| 470 |
+
|
| 471 |
+
No response.
|
adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8512d479115b376f623c843a2c3ac9eacc7154209afea90132406b85ae85d9f
|
| 3 |
+
size 786064
|
adkdattributiondrivenknowledgedistillationforlanguagemodelcompression/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3c4a818dc83fc13cbc7d40d0abac5a0e0da319375cd856a2e7ff334f58e19385
|
| 3 |
+
size 514112
|
advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/654811cb-0e2a-4224-92f1-ee372104182c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:57e0036a99df8dfa18af433103ae735666aa47f86bbf099a2a5a68b444721999
|
| 3 |
+
size 110731
|
advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/654811cb-0e2a-4224-92f1-ee372104182c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2af5f1eaf0076b41518448454d8c3e5af24189a2723fc599dc93b667296b35e9
|
| 3 |
+
size 125357
|
advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/654811cb-0e2a-4224-92f1-ee372104182c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab29fb938b438292720a14653f4d0ca1dc9921de1e23098db30accbfea5ee807
|
| 3 |
+
size 568925
|
advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/full.md
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Advancing Multi-Criteria Chinese Word Segmentation Through Criterion Classification and Denoising
|
| 2 |
+
|
| 3 |
+
Tzu-Hsuan Chou* and Chun-Yi Lin* and Hung-Yu Kao
|
| 4 |
+
|
| 5 |
+
Intelligent Knowledge Management Lab
|
| 6 |
+
|
| 7 |
+
Institute of Computer Science and Information Engineering
|
| 8 |
+
|
| 9 |
+
National Cheng Kung University
|
| 10 |
+
|
| 11 |
+
Tainan, Taiwan
|
| 12 |
+
|
| 13 |
+
ProFatXuanAll@gmail.com, NE6101050@gs.ncku.edu.tw,
|
| 14 |
+
|
| 15 |
+
hykao@mail.ncku.edu.tw
|
| 16 |
+
|
| 17 |
+
# Abstract
|
| 18 |
+
|
| 19 |
+
Recent research on multi-criteria Chinese word segmentation (MCCWS) mainly focuses on building complex private structures, adding more handcrafted features, or introducing complex optimization processes. In this work, we show that through a simple yet elegant inputhint-based MCCWS model, we can achieve state-of-the-art (SoTA) performances on several datasets simultaneously. We further propose a novel criterion-denoising objective that hurts slightly on F1 score but achieves SoTA recall on out-of-vocabulary words. Our result establishes a simple yet strong baseline for future MCCWS research. Source code is available at https://github.com/IKMLab/MCCWS.
|
| 20 |
+
|
| 21 |
+
# 1 Introduction
|
| 22 |
+
|
| 23 |
+
Chinese word segmentation (CWS) is a preliminary step for performing Chinese NLP tasks. Researchers have proposed many CWS datasets to enhance word segmentation performance in different text domains. However, due to the divergence in linguistic perspectives, the same text passage can be segmented in entirely different ways across datasets. For example, in their written forms, Chinese human names have no spaces in between. Some datasets segment human names into last and first names, while others leave human names as a whole (see Table 1). The simplest way to address such an issue is through single-criterion CWS (SCCWS) model, i.e., to train different models for different datasets. But the cost of maintaining multiple versions of the same model becomes cumbersome as recent deep learning models get deeper and larger. Thus, recent CWS works started to shift their focuses to multi-criterion Chinese word segmentation (MCCWS), which aims to fit one model for all CWS datasets (Chen et al., 2017; He et al.,
|
| 24 |
+
|
| 25 |
+
<table><tr><td>Dataset</td><td>Samples</td><td>Labels</td></tr><tr><td>PKU</td><td>江-泽民</td><td>S-BE</td></tr><tr><td>MSRA</td><td>江泽民</td><td>BME</td></tr><tr><td>AS</td><td>何-樂-而-不-為</td><td>S-S-S-S-S</td></tr><tr><td>CITYU</td><td>何樂而不為</td><td>BMMME</td></tr></table>
|
| 26 |
+
|
| 27 |
+
Table 1: Actual samples from SIGHAN bakeoff 2005 datasets (Emerson, 2005) demonstrating labeling inconsistency. The hyphen “-” denotes segmentation. Labels are defined in Section 3.1. In the first two rows, the human name 江泽民 (Jiang Zemin) in PKU dataset is segmented into the last name 江 (Jiang) and the first name 泽民 (Zemin), but not in MSRA dataset. In the last two rows, the idiom 何樂而不為 (Why not do something?) is segmented in AS dataset but not in CITYU dataset. More examples can be found in these datasets.
|
| 28 |
+
|
| 29 |
+
2019; Gong et al., 2019; Huang et al., 2020b,a; Ke et al., 2020; Qiu et al., 2020; Ke et al., 2021).
|
| 30 |
+
|
| 31 |
+
MCCWS can be seen as a multi-task learning problem (Chen et al., 2017) that benefits from leveraging large amounts of heterogeneous data, meanwhile dealing with subtle linguistic divergence. Prior works are mainly divided into private-structure-based and input-hint-based models. In a typical SCCWS workflow, an input character sequence is first converted to character embeddings and fed to an encoder to get contextualized representation. The encoder output is then passed to a decoder to generate the final prediction (see Figure 1(a)). In private-structure-based MCCWS, an encoder-decoder pair is created for each dataset, but an additional encoder is shared across datasets to better leverage general knowledge (see Figure 1(b)). In input-hint-based MCCWS, instead of creating private structures for each dataset, all datasets share one encoder-decoder pair, and a criterion-specific hint is given as part of the input (see Figure 1(c)). Despite its simplicity, input-hint-based MCCWS models outperform private-structure-based MCCWS models.
|
| 32 |
+
|
| 33 |
+
Proven to be simple and effective, the input
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
(a)
|
| 37 |
+
|
| 38 |
+

|
| 39 |
+
(b)
|
| 40 |
+
Figure 1: (a) Typical SCCWS model, (b) private-structure-based MCCWS model, and (c) input-hint-based MCCWS model. All three types of models share similar workflows. B, E, M, S are collectively defined as the output tagset of a CWS model (see Section 3.1). The character sequence "何樂而不為" (Why not do something?) is used as an input demonstration. [k] represents the criterion of the $k$ -th dataset and is served as an input hint. SCCWS and input-hint-based MCCWS models are nearly identical with the input being the only difference.
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
(c)
|
| 44 |
+
|
| 45 |
+
hint-based approach has become the most popular choice of recent MCCWS works (He et al., 2019; Gong et al., 2019; Huang et al., 2020a; Ke et al., 2020; Qiu et al., 2020; Ke et al., 2021). While existing works kept adding complex features and structures, we show that without such complexity, we can still achieve state-of-the-art (SoTA) results across 10 CWS datasets. We do this by jointly training MCCWS with a criterion classification objective on a simple model. In particular, we used a pre-trained Chinese BERT (Devlin et al., 2019) as our encoder and a softmax decoder. Neither handcrafted features nor complex non-greedy decoding algorithms were used.
|
| 46 |
+
|
| 47 |
+
One problem remains for input-hint-based MCCWS models. When fitting on a training set or evaluating a test set, each character sequence is sampled from a particular dataset, so one would always know which criterion-specific hint was given as input. However, when performing inference, one would not know the source of a given character sequence. Therefore, one has to choose the criterion in such cases manually. With hundreds of linguistic rules (Emerson, 2005), it is difficult for non-linguists to determine which criterion to use. Thus, inspired by the masked language model, we proposed a novel criterion-denoising objective to make our MCCWS model automatically choose a suitable criterion for each input. We show that adding such a denoising objective surprisingly retains near SoTA performance on the F1-score, and even outperforms SoTA performance on the recall of out-of-vocabulary (OOV) words.
|
| 48 |
+
|
| 49 |
+
# 2 Related Works
|
| 50 |
+
|
| 51 |
+
After Xue (2003) proposed to treat CWS as a character tagging problem, many works followed the same problem formulation to address CWS. Chen et al. (2017) is the first to propose a multi-criteria learning framework for CWS. They proposed multiple private-structure-based MCCWS models and trained them in an adversarial setting. A criterion discriminator was used in their adversarial training so that common knowledge across datasets could be shared through different private structures. But the nature of adversarial training forces their criterion discriminator to predict each criterion with equal probability (Goodfellow et al., 2014; Chen et al., 2017). Thus their criterion discriminator failed to provide accurate criterion prediction and cannot be used to choose a suitable criterion for each input.
|
| 52 |
+
|
| 53 |
+
Inspired by the success of the BiLSTM-based SCCWS model (Ma et al., 2018) and input-hint-based multilingual neural machine translation system (Johnson et al., 2017), He et al. (2019) proposed to build an input-hint-based MCCWS on top of the BiLSTM. They added two artificial tokens representing a criterion and put them at the beginning and the end of an input sentence. Such a simple idea advanced the SoTA performance on seven datasets simultaneously. Gong et al. (2019) proposed switch-LSTMs, which can dynamically route between multiple BiLSTMs to encode criterion-specific features when given different input hints. Their work set the SoTA limit that can be achieved via LSTM architecture.
|
| 54 |
+
|
| 55 |
+
After the remarkable effectiveness of pre-trained language models was found, MCCWS works
|
| 56 |
+
|
| 57 |
+
started to replace LSTM encoders with Transformer encoders (Vaswani et al., 2017). Huang et al. (2020a) used RoBERTa (Liu et al., 2019) to build an input-hint-based MCCWS model, which advanced SoTA performance. Huang et al. (2020b) shows that adding private structures on top of a large pre-trained model can push SoTA even further. Ke et al. (2021) pre-trained an input-hint-based MCCWS on BERT (Devlin et al., 2019) with meta-learning (Finn et al., 2017), but only after finetuning did they become the new SoTA on SCCWS models.
|
| 58 |
+
|
| 59 |
+
Ke et al. (2020) and Qiu et al. (2020) are the most similar to ours among many MCCWS works. We use a nearly identical input-hint-based model as in Qiu et al. (2020). However, like all the works mentioned before, they do not include a criterion classification objective, and therefore fail to provide a way to choose criteria automatically. Ke et al. (2020) is the only work using criterion classification objective, but we further simplified its model structure, which outperforms their models on average F1-score. We further proposed a novel criterion-denoising objective that helps choose criteria automatically. By trading off $0.07\%$ F1-score on average, we achieved the new SoTA on the OOV recall, which improved by a large margin compared to the previous SoTA $(1.61\%)$ .
|
| 60 |
+
|
| 61 |
+
In summary, previous research on MCCWS either did not provide a way to choose a criterion or always manually chose a criterion. In our work, we proposed a simple yet elegant way to make our MCCWS model automatically choose a suitable criterion for the given character sequence. Comparing our works to others, we find that (1) our model has the simplest structure and is the easiest to implement among other works; (2) we achieved MCCWS SoTA performance on several CWS datasets and on average F1-score over 10 datasets; (3) we improved SoTA OOV recall by a large margin.
|
| 62 |
+
|
| 63 |
+
# 3 MCCWS
|
| 64 |
+
|
| 65 |
+
In this section, we describe the detail of our methodology. We first give a formal definition of inputhint-based MCCWS (Section 3.1). Then we introduce our MCCWS model (Section 3.2). Finally, we formally define our criterion-denoising objective and describe how to jointly train our MCCWS on top of the proposed denoising objective (Section 3.3).
|
| 66 |
+
|
| 67 |
+
# 3.1 Problem Definition
|
| 68 |
+
|
| 69 |
+
Let $x$ be a character sequence. Denote the $i$ -th character of sequence $x$ as $x_{i}$ , and the $i$ -th output corresponds to $x$ as $y_{i}$ . Each $y_{i}$ belongs to a tagset $\mathcal{T} = \{\mathrm{B},\mathrm{M},\mathrm{E},\mathrm{S}\}$ where $\mathrm{B},\mathrm{M},\mathrm{E}$ represent the beginning, the middle, and the end of a word, and $\mathrm{S}$ represents a word with a single character. When receiving a character sequence $x$ , a SCCWS model will pass $x$ to its encoder (with parameter $\theta_{\mathrm{enc}}$ ) to generate the contextualized representation of $x$ , then feed the encoder output to its decoder (with parameter $\theta_{\mathrm{dec}}$ ) to generate prediction $y$ based on $x$ , following the constraint of the tagset $\mathcal{T}$ (see Figure 1(a)). Typically, a decoder such as the conditional random field (CRF) (Lafferty et al., 2001) will search through all possible combinations and return the combination with the highest probability:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
y ^ {*} = \underset {y \in \mathcal {T} ^ {| x |}} {\arg \max } \Pr (y \mid x; \theta_ {\mathrm {e n c}}, \theta_ {\mathrm {d e c}}), \tag {1}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $|x|$ denotes the number of characters of $x$ . The goal of a SCCWS model with parameters $\theta_{\mathrm{enc}}$ and $\theta_{\mathrm{dec}}$ is to maximize the probability of $y$ given $x$ over all pairs of $(x,y)$ in a CWS dataset $\mathcal{D}$ . One can achieve this by minimizing the negative log-likelihood $\mathcal{L}$ over dataset $\mathcal{D}$ :
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\begin{array}{l} \mathcal {L} (\mathcal {D}, \theta_ {\mathrm {e n c}}, \theta_ {\mathrm {d e c}}) \\ = \min - \sum_ {(x, y) \in \mathcal {D}} \log \Pr (y | x; \theta_ {\mathrm {e n c}}; \theta_ {\mathrm {d e c}}). \tag {2} \\ \end{array}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
Now suppose there are $K$ different CWS datasets $\{\mathcal{D}^k\}_{k = 1}^K$ . When receiving a character sequence $x$ from the $k$ -th dataset $\mathcal{D}^k$ , an input-hint-based MCCWS model will combine $x$ with the $k$ -th criterion token $[\mathbf{k}]$ to form a new sequence (see Figure 1(c)). The new sequence is then processed as in Equation (1). Therefore, we can rewrite Equation (2) to define the minimization objective of an input-hint-based MCCWS model with parameters $\theta_{\mathrm{enc}}$ and $\theta_{\mathrm{dec}}$ :
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\begin{array}{l} \mathcal {L} \left(\left\{\mathcal {D} \right\} _ {k = 1} ^ {K}, \theta_ {\mathrm {e n c}}, \theta_ {\mathrm {d e c}}\right) \\ = \min - \sum_ {k = 1} ^ {K} \sum_ {(x, y) \in \mathcal {D} ^ {k}} \log \Pr (y | x, [ k ]; \theta_ {\text {e n c}}; \theta_ {\text {d e c}}). \tag {3} \\ \end{array}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
Observe that the negative log-likelihood of $y$ is conditioned on both $x$ and $[\mathbf{k}]$ , and the minimization is performed on all $K$ datasets simultaneously instead of a single dataset.
|
| 88 |
+
|
| 89 |
+
# 3.2 Model Definition
|
| 90 |
+
|
| 91 |
+
Input Format. For each dataset $\mathcal{D}^k$ and each character sequence $x\in \mathcal{D}^k$ , let
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\mathbf {x} = \left[ \left[ \mathrm {C L S} \right]; [ \mathrm {k} ]; x; [ \mathrm {S E P} ] \right] \tag {4}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
be the new sequence formed by concatenating the [CLS] token, the $k$ -th criterion token $[\mathbf{k}]$ , character sequence $x$ , and the [SEP] token. $\mathbf{x}$ is treated as a sequence with $3 + |x|$ characters and fed into our MCCWS encoder.
|
| 98 |
+
|
| 99 |
+
Encoder. We used a pre-trained Chinese BERT as our encoder, and we denote the output of BERT as h:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\mathbf {h} = \operatorname {B E R T} (\mathbf {x}; \theta_ {\mathrm {e n c}}) \in \mathbb {R} ^ {(3 + | x |) \times d _ {\mathrm {m o d e l}}}, \tag {5}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
where $d_{\mathrm{model}}$ is the hidden dimension of BERT. Devlin et al. (2019) includes all details of BERT. Both [CLS] and [SEP] tokens are only used to follow the BERT input format with no further computations done on both tokens. We note that we neither use any private structures nor handcrafted features. Thus, our encoder architecture can be considered as the simplest among other MCCWS works.
|
| 106 |
+
|
| 107 |
+
Decoder. To keep our model simple, we choose a greedy decoding algorithm over a non-greedy one. We use one linear layer followed by a softmax normalization as our decoder. The output of BERT encoder $\mathbf{h}$ , with starting index 3, is fed directly into our decoder:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\begin{array}{l} \mathbf {y} _ {i - 2} = \operatorname {s o f t m a x} \left(W ^ {h} \cdot \mathbf {h} _ {i} + b ^ {h}\right) \in \mathbb {R} ^ {4} \\ \text {f o r a l l} i \in \{3, \dots , | x | + 2 \}. \tag {6} \\ \end{array}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
$W^{h}\in \mathbb{R}^{4\times d_{\mathrm{model}}}$ and $b^{h}\in \mathbb{R}^{4}$ are trainable parameters, and 4 is the size of tagset $\mathcal{T}$ . Our decoder will generate a sequence of probability vectors $\mathbf{y} = (\mathbf{y}_1,\dots ,\mathbf{y}_{|x|})\in \mathbb{R}^{|x|\times 4}$ . Since we use greedy decoding, we optimize our input-hint-based MCCWS model with cross-entropy loss instead of negative log-likelihood. So we change Equation (3) as follows:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\begin{array}{l} \mathcal {L} (\{\mathcal {D} \} _ {k = 1} ^ {K}, \theta_ {\mathrm {e n c}}, \theta_ {\mathrm {d e c}}) \\ = \min - \sum_ {k = 1} ^ {K} \sum_ {(x, y) \in \mathcal {D} ^ {k}} \sum_ {i = 1} ^ {| x |} \mathbf {1} _ {y _ {i}} \odot \log \mathbf {y} _ {i}, \tag {7} \\ \end{array}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $\mathbf{1}_{y_i}$ denotes the one-hot encoding corresponding to $y_{i},\odot$ denotes the Hadamard product, and $\log \mathbf{y}_i$ denotes performing log operation on probability vector $\mathbf{y}_i$ in an element-wise fashion.
|
| 120 |
+
|
| 121 |
+
Criterion Classification To make our model remember the meaning of criterion hint $[\mathbf{k}]$ during the forward pass, we introduce a criterion classification task. We let our model predict which criterion hint it received. So we pick $\mathbf{h}_2$ , the output of BERT that corresponds to the criterion token $[\mathbf{k}]$ , and feed it into a criterion classifier which consists of one linear layer (different from our decoder) following a softmax normalization:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathbf {c} = \operatorname {s o f t m a x} \left(W ^ {c} \cdot \mathbf {h} _ {2} + b ^ {c}\right) \in \mathbb {R} ^ {K}. \tag {8}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
Both $W^{c} \in \mathbb{R}^{K \times d_{\mathrm{model}}}$ and $b^{c} \in \mathbb{R}^{K}$ are trainable parameters. Our criterion classifier is set to minimize cross-entropy loss, just like Equation (7):
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\begin{array}{l} \mathcal {L} _ {\mathbf {c}} (\{\mathcal {D} \} _ {k = 1} ^ {K}, \theta_ {\mathrm {e n c}}, \theta_ {\mathrm {d e c}}) \\ = \min - \sum_ {k = 1} ^ {K} \sum_ {(x, y) \in \mathcal {D} ^ {k}} \mathbf {1} _ {[ k ]} \odot \log \mathbf {c}, \tag {9} \\ \end{array}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
where $\mathbf{1}_{[\mathbf{k}]}$ denotes the one-hot encoding that corresponds to $[\mathbf{k}]$ and $\log \mathbf{c}$ denotes the element-wise log operation on the probability vector $\mathbf{c}$ .
|
| 134 |
+
|
| 135 |
+
Total Loss Combining Equations (7) and (9), we get our final loss $\mathcal{L}_{\mathrm{final}}$ :
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\begin{array}{l} \mathcal {L} _ {\text {f i n a l}} \left(\left\{\mathcal {D} \right\} _ {k = 1} ^ {K}, \theta_ {\text {e n c}}, \theta_ {\text {d e c}}\right) \\ = \mathcal {L} (\{\mathcal {D} \} _ {k = 1} ^ {K}, \theta_ {\mathrm {e n c}}, \theta_ {\mathrm {d e c}}) \\ + \mathcal {L} _ {\mathbf {c}} \left(\left\{\mathcal {D} \right\} _ {k = 1} ^ {K}, \theta_ {\text {e n c}}, \theta_ {\text {d e c}}\right). \tag {10} \\ \end{array}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
We jointly train both objectives on our input-hint-based MCCWS model. Surprisingly, this joint objective gives us SoTA performance on several datasets.
|
| 142 |
+
|
| 143 |
+
# 3.3 Criterion Denoising
|
| 144 |
+
|
| 145 |
+
To avoid manually giving criterion tokens, we design a criterion-denoising objective to make our model choose the suitable criterion for each input. We define a token [UNC], which stands for "unknown criterion," and we randomly replace each pairing criterion $[\mathbf{k}]$ with [UNC]. In this situation, the goal of our criterion classifier (see Equation (8)) is to find the best fitting criterion for the given input $x$ . So Equation (9) becomes a denoising objective, in a similar way to the masked language model
|
| 146 |
+
|
| 147 |
+
objective used in BERT. After training with [UNC], the model can choose a suitable criterion for $x$ and perform CWS simultaneously, all in just a single forward pass. We show that such an auto mechanism does not harm the performance, making our model effective and practical.
|
| 148 |
+
|
| 149 |
+
# 4 Experiments
|
| 150 |
+
|
| 151 |
+
# 4.1 Datasets
|
| 152 |
+
|
| 153 |
+
We perform experiments on 10 CWS datasets (this means $K = 10$ ). Four datasets are from the SIGHAN2005 bakeoff (Emerson, 2005), including AS, CITYU, PKU, and MSRA; SXU is from the SIGHAN2008 bakeoff (Jin and Chen, 2008); the rest are $\mathrm{CNC}^2$ , CTB6 (Xue et al., 2005), UD (Zeman et al., 2018), WTB (Wang et al., 2014) and ZX (Zhang et al., 2014). Following Emerson (2005), we report the F1-score and OOV recall.
|
| 154 |
+
|
| 155 |
+
Our preprocessing mainly follows the works of He et al. (2019) and Chen et al. (2017), as done by others. We first convert all full-width characters into half-width. Then, we replace different consecutive digits into one token (we do the same for alphabets). Unlike others who set the maximum sentence length to 128 or lower to speed up the training process, we decide to utilize the full computing power of BERT and include as many characters in the same context as possible. So we set the maximum sentence length to 512. For sentences longer than 512, we try to find the nearest punctuation as our delimiter, otherwise, we split on the 512th character. The statistics for all datasets can be found in Appendix A.
|
| 156 |
+
|
| 157 |
+
# 4.2 Hyperparameters
|
| 158 |
+
|
| 159 |
+
We use PyTorch (Paszke et al., 2019) to implement our model. We fine-tune BERT with AdamW (Loshchilov and Hutter, 2019) on the pre-trained checkpoint bert-base-chinese provided by huggingface (Wolf et al., 2020) (this means $d_{\mathrm{model}} = 768$ and the number of parameters is around 110M). Moving average coefficients $(\beta_{1},\beta_{2})$ of AdamW are set to $(0.9,0.999)$ . The learning rate is set to $2\times 10^{-5}$ , and the weight decay coefficient is set to 0.01. We schedule the learning rate with linear warmup and linear decay. The warmup ratio is set to 0.1, and the total training step is set to 170000. Dropout (Srivastava et al., 2014) is applied with a probability of 0.1. We set the batch size to 32, and use gradient accumulation
|
| 160 |
+
|
| 161 |
+
with two steps (this is almost equivalent to setting the batch size to 64). We use label smoothing only on the decoder but not on the criterion classifier, and we set the smoothing value to 0.1. We pick the checkpoint with the highest F1 on the development set to calculate test set F1. For each experiment reported later, we ran each over 5 random seeds and reported only the best result. The results of all trials are listed in Appendix A. All experiments were run on a single Intel Xeon Silver 4216 CPU and an Nvidia RTX 3090 GPU.
|
| 162 |
+
|
| 163 |
+
# 4.3 Main Results
|
| 164 |
+
|
| 165 |
+
SoTA F1-score. Table 2 shows our results on F1 over 10 CWS datasets. Our MCCWS model (denoted as "Ours") achieves SoTA results on 5 out of 10 datasets. Since not all works performed experiments on all the same 10 datasets, we also report average results on the most common 4 (denoted as Avg.4) and 6 (denoted as Avg.6) datasets. Results show that our model is ranked 2nd under Avg.4 and Avg.6, which is only $0.14\%$ and $0.05\%$ less than the best-performing model respectively. We note that Huang et al. (2020b) used a private-structure-based MCCWS with CRF decoder, therefore, has way more parameters than our proposed model. Nevertheless, our model achieves the SoTA performance on average over 10 datasets (denoted as Avg.10). Therefore, despite the simplicity, our model still performs well against strong baselines.
|
| 166 |
+
|
| 167 |
+
Noisy but near SoTA. In Section 3.3, we proposed a criterion-denoising objective. We randomly select $10\%$ criterion tokens for each minibatch and replace them with [UNC]. Table 2 shows the performance of our criterion denoising MC-CWS model (denoted as ours+10%[UNC]). We see that the denoising version of our model beats the previous SoTA on Avg.10 and even achieved the new SoTA on 5 datasets. This shows that our criterion-denoising objective does not hinder the performance, but helps our model advance to near SoTA results.
|
| 168 |
+
|
| 169 |
+
SoTA OOV Recall. Table 3 shows our results on OOV recall over 10 CWS datasets. Our models achieve SoTA results on 9 out of 10 datasets with or without criterion-denoising objective. CWS task is challenging when the word boundary is ambiguous, which can only be eased by giving enough context. Thus, we attribute the remarkable OOV recall improvement to our preprocessing step, for which we set the maximum input length to 512, giving our
|
| 170 |
+
|
| 171 |
+
<table><tr><td>MCCWS Models</td><td>AS</td><td>CITYU</td><td>CNC</td><td>CTB6</td><td>MSRA</td><td>PKU</td><td>SXU</td><td>UD</td><td>WTB</td><td>ZX</td><td>Avg.4</td><td>Avg.6</td><td>Avg.10</td></tr><tr><td>Model-I+ADVa</td><td>94.64</td><td>95.55</td><td>-</td><td>96.18</td><td>96.04</td><td>94.32</td><td>96.04</td><td>-</td><td>-</td><td>-</td><td>95.14</td><td>95.46</td><td>-</td></tr><tr><td>BiLSTM+CRF-4b</td><td>95.40</td><td>96.20</td><td>-</td><td>-</td><td>97.40</td><td>95.90</td><td>-</td><td>-</td><td>-</td><td>-</td><td>96.26</td><td>-</td><td>-</td></tr><tr><td>BiLSTM+CRF-8b</td><td>95.47</td><td>95.60</td><td>-</td><td>95.84</td><td>97.35</td><td>95.78</td><td>96.49</td><td>-</td><td>-</td><td>-</td><td>96.05</td><td>96.09</td><td>-</td></tr><tr><td>Switch-LSTMc</td><td>95.22</td><td>96.22</td><td>-</td><td>97.62</td><td>97.78</td><td>96.15</td><td>97.25</td><td>-</td><td>-</td><td>-</td><td>96.34</td><td>96.71</td><td>-</td></tr><tr><td>RoBERTa+softmaxd</td><td>-</td><td>-</td><td>97.19</td><td>97.56</td><td>98.29</td><td>96.85</td><td>97.56</td><td>97.69</td><td>-</td><td>96.46</td><td>-</td><td>-</td><td>-</td></tr><tr><td>BERT+CRFe</td><td>97.00</td><td>97.80</td><td>97.30</td><td>97.80</td><td>98.50</td><td>97.30</td><td>97.50</td><td>97.80</td><td>93.20</td><td>97.10</td><td>97.65</td><td>97.65</td><td>97.13</td></tr><tr><td>Transformer+CRFf</td><td>96.44</td><td>96.91</td><td>-</td><td>96.99</td><td>98.05</td><td>96.41</td><td>97.61</td><td>-</td><td>-</td><td>-</td><td>96.95</td><td>97.07</td><td>-</td></tr><tr><td>Unified BiLSTMg</td><td>95.47</td><td>95.60</td><td>-</td><td>95.84</td><td>97.35</td><td>95.78</td><td>96.49</td><td>-</td><td>-</td><td>-</td><td>96.05</td><td>96.09</td><td>-</td></tr><tr><td>Unified BERTg</td><td>96.90</td><td>97.07</td><td>-</td><td>97.20</td><td>98.45</td><td>96.89</td><td>97.81</td><td>-</td><td>-</td><td>-</td><td>97.33</td><td>97.39</td><td>-</td></tr><tr><td>METASEGh</td><td>97.04</td><td>98.12</td><td>97.25</td><td>97.87</td><td>98.02</td><td>96.76</td><td>97.51</td><td>83.84</td><td>89.53</td><td>88.48</td><td>97.49</td><td>97.55</td><td>-</td></tr><tr><td>Ours</td><td>96.65</td><td>98.15</td><td>97.43</td><td>97.84</td><td>98.36</td><td>96.86</td><td>97.73</td><td>98.28</td><td>93.94</td><td>97.14</td><td>97.51</td><td>97.60</td><td>97.24</td></tr><tr><td>Ours+10%[UNC]</td><td>96.66</td><td>98.16</td><td>97.39</td><td>97.88</td><td>98.28</td><td>96.85</td><td>97.67</td><td>98.04</td><td>93.65</td><td>97.07</td><td>97.49</td><td>97.58</td><td>97.17</td></tr><tr><td>Ours+10%[UNC]+auto</td><td>96.63</td><td>97.26</td><td>96.92</td><td>96.87</td><td>95.35</td><td>95.35</td><td>92.94</td><td>97.94</td><td>92.45</td><td>96.29</td><td>96.15</td><td>95.73</td><td>95.80</td></tr></table>
|
| 172 |
+
|
| 173 |
+
Table 2: The F1-score (in percentage) on all 10 datasets. The F1-scores other than ours are directly recorded from their papers. Numbers in bold indicate the SoTA and numbers in underlined indicate the SoTA achieved by our MCCWS models. Avg.4: Average over AS, CITYU, MSRA, and PKU; Avg.6: Average over AS, CITYU, CTB6, MSRA, PKU, and SXU; Avg.10: Average over 10 datasets; a: (Chen et al., 2017); b: (He et al., 2019); c: (Gong et al., 2019); d: (Huang et al., 2020a); e: (Huang et al., 2020b); f: (Qiu et al., 2020); g: (Ke et al., 2020); h: (Ke et al., 2021); Ours: Our model without criterion-denoising objective; Ours+10%[UNC]: Our model with criterion-denoising objective and randomly replacing 10% of criterion with [UNC]; Ours+10%[UNC]+auto: Same as Our+10%[UNC] but use [UNC] token to perform evaluation.
|
| 174 |
+
|
| 175 |
+
model enough context to identify unseen words. We will further discuss this result in Section 4.4. But with the help of our criterion-denoising objective, we see that OOV recall is boosted even higher, showing the effectiveness of our criterion-denoising objective.
|
| 176 |
+
|
| 177 |
+
Auto Mechanism In Section 3.3, we claimed that our criterion-denoising objective could be used for choosing criteria automatically. We do this by pairing each input sequence on the test set with [UNC] and performing the evaluation. Table 2 shows that most datasets maintain their performances almost on par with the original even when using [UNC], and the average F1-score remains competitive with other baselines. This suggests that some common knowledge is shared throughout the 10 heterogeneous datasets, and our model can learn and leverage this knowledge.
|
| 178 |
+
|
| 179 |
+
Efficiency Unfortunately, almost all recent works do not release their source code. So it might be unfair to perform a quantifiable comparison. However, we can still do a time-complexity analysis. Since recent MCCWS works, including ours, use the same encoder architecture (BERT-base or RoBERTa-base), comparing the time complexity between different decoding algorithms is fair. CRF takes $O(|x| \cdot |\mathcal{T}|^2)$ , where $|x|$ stands for sequence length, and $|\mathcal{T}|$ stands for the number of classes (which is 4 for BMES tagging). Almost all recent works use CRF as their decoding strategy, but we
|
| 180 |
+
|
| 181 |
+
use greedy decoding, which only takes $O(|x|\cdot |\mathcal{T}|)$ Thus, our MCCWS model has lower time complexity and is more efficient.
|
| 182 |
+
|
| 183 |
+
# 4.4 Ablation Study
|
| 184 |
+
|
| 185 |
+
Increase Criterion Denoising Rate. This section studies what happens when the criterion denoising rate increases. Figure 2 shows that both the average F1-score and the average OOV recall decrease as criterion noise increases. This is expected as in the masked language model experiment of BERT, where increasing the masked rate results in fine-tune performance drop. However, as shown in Figure 2, using [UNC] to perform inference only gets affected slightly by different denoising rates. This suggests that when using criterion-denoising objective, our model learns to segment on the most common patterns showed across datasets. Thus, our model is robust to diverse inputs, which proven itself to be a "general CWS model" that shares knowledge across different CWS datasets.
|
| 186 |
+
|
| 187 |
+
Reduce Maximum Sentence Length. As shown in Table 3, our model's OOV recall outperformed others by a large margin. We suspect that it is due to our preprocessing step, which allows our model to take inputs up to 512 characters. Figure 3 shows that the longer a model's character sequence is allowed to take, the better the performance on the average F1-score and the average OOV recall. Performance on input length longer than 256 stays
|
| 188 |
+
|
| 189 |
+
<table><tr><td>MCCWS Models</td><td>AS</td><td>CITYU</td><td>CNC</td><td>CTB6</td><td>MSRA</td><td>PKU</td><td>SXU</td><td>UD</td><td>WTB</td><td>ZX</td><td>Avg.4</td><td>Avg.6</td><td>Avg.10</td></tr><tr><td>Model-II+ADVa</td><td>75.37</td><td>81.05</td><td>-</td><td>82.19</td><td>72.76</td><td>73.13</td><td>76.88</td><td>-</td><td>-</td><td>-</td><td>75.578</td><td>76.897</td><td>-</td></tr><tr><td>Switch-LSTMsb</td><td>77.33</td><td>73.58</td><td>-</td><td>83.89</td><td>64.20</td><td>69.88</td><td>78.69</td><td>-</td><td>-</td><td>-</td><td>71.248</td><td>74.595</td><td>-</td></tr><tr><td>RoBERTa+softmaxc</td><td>-</td><td>-</td><td>59.44</td><td>88.02</td><td>81.75</td><td>82.35</td><td>85.73</td><td>91.40</td><td>-</td><td>82.51</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Transformer+CRFd</td><td>76.39</td><td>86.91</td><td>-</td><td>87.00</td><td>78.92</td><td>78.91</td><td>85.08</td><td>-</td><td>-</td><td>-</td><td>80.283</td><td>82.202</td><td>-</td></tr><tr><td>Unified BERTe</td><td>79.26</td><td>87.27</td><td>-</td><td>87.77</td><td>83.35</td><td>79.71</td><td>86.05</td><td>-</td><td>-</td><td>-</td><td>82.398</td><td>83.902</td><td>-</td></tr><tr><td>METASEGf</td><td>80.89</td><td>90.66</td><td>61.90</td><td>89.21</td><td>83.03</td><td>80.90</td><td>85.98</td><td>93.59</td><td>85.00</td><td>87.33</td><td>83.870</td><td>85.112</td><td>83.849</td></tr><tr><td>Ours</td><td>79.07</td><td>91.61</td><td>66.15</td><td>91.40</td><td>88.82</td><td>82.87</td><td>87.27</td><td>93.75</td><td>85.63</td><td>87.20</td><td>85.593</td><td>86.840</td><td>85.377</td></tr><tr><td>Ours+10%[UNC]</td><td>79.26</td><td>92.09</td><td>66.82</td><td>91.60</td><td>88.41</td><td>83.31</td><td>87.15</td><td>93.07</td><td>85.32</td><td>87.60</td><td>85.768</td><td>86.970</td><td>85.463</td></tr><tr><td>Ours+10%[UNC]+auto</td><td>79.50</td><td>90.62</td><td>65.44</td><td>89.86</td><td>74.94</td><td>79.29</td><td>77.58</td><td>92.94</td><td>83.18</td><td>86.66</td><td>81.088</td><td>81.965</td><td>82.001</td></tr></table>
|
| 190 |
+
|
| 191 |
+
Table 3: The OOV recall (in percentage) on all 10 CWS datasets. The OOV recalls other than ours are directly recorded from their papers. Numbers in bold indicate the SoTA and numbers in underlined indicate the SoTA achieved by our MCCWS models. Avg.4: Average over AS, CITYU, MSRA, and PKU; Avg.6: Average over AS, CITYU, CTB6, MSRA, PKU, and SXU; Avg.10: Average over 10 datasets; a: (Chen et al., 2017); b: (Gong et al., 2019); c: (Huang et al., 2020a); d: (Qiu et al., 2020); e: (Ke et al., 2020); f: (Ke et al., 2021); Ours: Our model without criterion-denoising objective; Ours+10%[UNC]: Our model with criterion-denoising objective and randomly replacing 10% of criterion with [UNC]; Ours+10%[UNC]+auto: Same as Our+10%[UNC] but use [UNC] token to perform evaluation.
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Figure 2: Left: Trade-off between denoising rate and the average F1-score. Right: Trade-off between denoising rate and the average OOV recall. use [k]: Use criterion-specific token [k] to perform inference; use [UNC]: Use [UNC] to perform inference.
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
Figure 3: Left: Trade-off between the maximum sentence length constraint used in training and the average F1-score. Right: Trade-off between the maximum sentence length constraint used in training and the average OOV recall. test maximum length $= x$ : Use the same maximum length constraint to perform inference. test maximum length $= 512$ : Ignore the maximum length constraint and use up to 512 characters to perform inference.
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
|
| 203 |
+
<table><tr><td>Metric</td><td>MCCWS Models</td><td>AS</td><td>CITYU</td><td>CNC</td><td>CTB6</td><td>MSRA</td><td>PKU</td><td>SXU</td><td>UD</td><td>WTB</td><td>ZX</td><td>Avg.10</td></tr><tr><td rowspan="6">F1</td><td>Ours</td><td>96.65</td><td>98.15</td><td>97.43</td><td>97.84</td><td>98.36</td><td>96.86</td><td>97.73</td><td>98.28</td><td>93.94</td><td>97.14</td><td>97.24</td></tr><tr><td>-criterion classifier</td><td>-0.03</td><td>+0.01</td><td>-0.01</td><td>-0.02</td><td>+0.00</td><td>+0.04</td><td>-0.02</td><td>-0.11</td><td>-0.45</td><td>+0.03</td><td>-0.06</td></tr><tr><td>Ours+10%[UNC]</td><td>96.66</td><td>98.16</td><td>97.39</td><td>97.88</td><td>98.28</td><td>96.85</td><td>97.67</td><td>98.04</td><td>93.65</td><td>97.07</td><td>97.17</td></tr><tr><td>-criterion classifier</td><td>-0.06</td><td>+0.00</td><td>+0.02</td><td>-0.05</td><td>+0.09</td><td>+0.03</td><td>-0.01</td><td>+0.30</td><td>+0.20</td><td>+0.06</td><td>+0.05</td></tr><tr><td>Ours+10%[UNC]+auto</td><td>96.63</td><td>97.26</td><td>96.92</td><td>96.87</td><td>95.35</td><td>95.35</td><td>92.94</td><td>97.94</td><td>92.45</td><td>96.29</td><td>95.80</td></tr><tr><td>-criterion classifier</td><td>-0.03</td><td>+0.04</td><td>-0.04</td><td>-0.30</td><td>-0.23</td><td>-0.11</td><td>-0.01</td><td>+0.24</td><td>-0.22</td><td>-0.37</td><td>-0.10</td></tr><tr><td rowspan="6">OOV recall</td><td>Ours</td><td>79.07</td><td>91.61</td><td>66.15</td><td>91.40</td><td>88.82</td><td>82.87</td><td>87.27</td><td>93.75</td><td>85.63</td><td>87.20</td><td>85.377</td></tr><tr><td>-criterion classifier</td><td>-0.32</td><td>+0.44</td><td>+0.10</td><td>-0.26</td><td>-0.78</td><td>+1.03</td><td>+0.17</td><td>-0.61</td><td>-0.92</td><td>+0.90</td><td>-0.025</td></tr><tr><td>Ours+10%[UNC]</td><td>79.26</td><td>92.09</td><td>66.82</td><td>91.60</td><td>88.41</td><td>83.31</td><td>87.15</td><td>93.07</td><td>85.32</td><td>87.60</td><td>85.463</td></tr><tr><td>-criterion classifier</td><td>-0.04</td><td>-0.13</td><td>-0.52</td><td>-0.17</td><td>+0.52</td><td>+1.19</td><td>+0.69</td><td>+0.88</td><td>+1.22</td><td>-0.04</td><td>+0.360</td></tr><tr><td>Ours+10%[UNC]+auto</td><td>79.50</td><td>90.62</td><td>65.44</td><td>89.86</td><td>74.94</td><td>79.29</td><td>77.58</td><td>92.94</td><td>83.18</td><td>86.66</td><td>82.001</td></tr><tr><td>-criterion classifier</td><td>-0.61</td><td>+0.50</td><td>-0.31</td><td>-0.15</td><td>-0.21</td><td>+0.75</td><td>+1.13</td><td>+0.94</td><td>+0.61</td><td>+0.17</td><td>+0.282</td></tr></table>
|
| 204 |
+
|
| 205 |
+
mostly the same since only a few sequences have their length longer than 256 (the average sentence length on all 10 datasets is 37.09, see Appendix A). However, we found an easy fix for models trained on shorter sentences: That is, allow input sequence length up to 512. Despite not being trained on such a long sequence, we found that all models' performance increased after feeding longer input. This is consistent with the common sense that longer input reduces the chance of ambiguity and thus performs better on CWS.
|
| 206 |
+
|
| 207 |
+
Criterion Classifier When removing the criterion classifier, our average F1-score drops nearly $0.1\%$ (Table 4, row 1), which is the gap between our model and the previous SoTA. F1-score drops even more when we use [UNC] to perform inference (Table 4, row 3). On the other hand, average OOV recall seems to increase when removing the criterion classifier (Table 4, rows 5-6). This suggests that without the criterion classifier, the ability to differentiate criterions was hindered (thus average F1 drops), and MCCWS model started to treat different datasets as a whole (thus average OOV recall improves). This shows the effectiveness of the criterion classification.
|
| 208 |
+
|
| 209 |
+
Case Study We provide examples to demonstrate our MCCWS model's capability of segmenting differently when given different criterion tokens. Table 5 shows that in some cases, one sentence can be segmented in at least five different ways, which proves that our model can perform CWS based on various criteria. Table 6 shows that in some other cases, most criteria agree with each other, which proves that our model can leverage the common knowledge shared across datasets. We leave more examples in Appendix A for interested readers.
|
| 210 |
+
|
| 211 |
+
Table 4: The impact on F1s/OOV recalls when removing the criterion classifier. -criterion classifier: The corresponding experiment from the previous row but removing the criterion classifier.
|
| 212 |
+
|
| 213 |
+
<table><tr><td>Original Sentence</td><td>也是言之有據</td></tr><tr><td>AS-gold</td><td>也-是-言-之-有-據</td></tr><tr><td>CITYU-gold</td><td>也是-言之有據</td></tr><tr><td>AS-infer</td><td>也-是-言-之-有-據</td></tr><tr><td>CITYU-infer</td><td>也是-言之有據</td></tr><tr><td>CNC-infer</td><td>也是-言之有據</td></tr><tr><td>CTB6-infer</td><td>也-是-言之有據</td></tr><tr><td>MSRA-infer</td><td>也是-言之有據</td></tr><tr><td>PKU-infer</td><td>也-是-言之有據</td></tr><tr><td>SXU-infer</td><td>也-是-言之有據</td></tr><tr><td>UD-infer</td><td>也是-言-之有據</td></tr><tr><td>WTB-infer</td><td>也是-言之有據</td></tr><tr><td>ZX-infer</td><td>也-是-言-之-有據</td></tr><tr><td>[UNC]-infer</td><td>也是-言之有據</td></tr></table>
|
| 214 |
+
|
| 215 |
+
Table 5: Examples showcasing that one sentence can have multiple segmentation criteria, and our MCCWS model can deal with these linguistic divergences. We found five different ways to segment the same sentence "也是言之有據" (Claims are justified). $\mathcal{D}^k$ -gold: Ground truth segmentation labeled in dataset $\mathcal{D}^k$ . $\mathcal{D}^k$ -infer: Inference result of our MCCWS model with criterion token [k]. [UNC]-infer: Inference result of our MCCWS model with unknown criterion token [UNC]. The hyphen “-” denotes segmentation.
|
| 216 |
+
|
| 217 |
+
# 5 Conclusion
|
| 218 |
+
|
| 219 |
+
In this paper, we proposed a simple yet effective input-hint-based MCCWS model which achieves several SoTA results across 10 CWS datasets. We also proposed a novel criterion-denoising objective which makes our model capable of choosing criterion automatically for each character sequence. Experiment results show that our novel denoising objective does not suffer dramatic performance loss but helps our MCCWS model retain near SoTA performance and even outperform previous work on
|
| 220 |
+
|
| 221 |
+
<table><tr><td>Original Sentence</td><td>江泽民总书记</td></tr><tr><td>MSRA-gold</td><td>江泽民-总书记</td></tr><tr><td>PKU-gold</td><td>江-泽民-总书记</td></tr><tr><td>AS-infer</td><td>江泽民-总书记</td></tr><tr><td>CITYU-infer</td><td>江泽民-总书记</td></tr><tr><td>CNC-infer</td><td>江泽民-总书记</td></tr><tr><td>CTB6-infer</td><td>江泽民-总书记</td></tr><tr><td>MSRA-infer</td><td>江泽民-总书记</td></tr><tr><td>PKU-infer</td><td>江-泽民-总书记</td></tr><tr><td>SXU-infer</td><td>江泽民-总书记</td></tr><tr><td>UD-infer</td><td>江-泽民-总书记</td></tr><tr><td>WTB-infer</td><td>江泽民-总书记</td></tr><tr><td>ZX-infer</td><td>江泽民-总书记</td></tr><tr><td>[UNC]-infer</td><td>江泽民-总书记</td></tr></table>
|
| 222 |
+
|
| 223 |
+
Table 6: Examples showcasing that our model can leverage shared common knowledge across datasets. We found three different ways to segment the same sentence "江泽民总书记" (General Secretary Jiang Zemin). We define symbols in the same way as in Table 5.
|
| 224 |
+
|
| 225 |
+
OOV recall by a large margin. Our model can serve as a simple and robust baseline for MCCWS work or as the starting point to further fine-tune into SC-CWS models. In the future, we will try to gather more CWS datasets and perform more extensive experiments on more datasets.
|
| 226 |
+
|
| 227 |
+
# Limitations
|
| 228 |
+
|
| 229 |
+
Unfortunately, we cannot access most SIGHAN2008 bakeoff datasets, which were proprietary but used by many previous works. This makes the comparison in Table 2 a little unfair. We argue that we replaced these non-accessible datasets with the ones publicly accessible (including UD, WTB, and ZX). We note that Huang et al. (2020b) faced the same limitation as us. Thus they also replaced datasets just as we did, which makes them the only directly comparable work to ours.
|
| 230 |
+
|
| 231 |
+
# Acknowledgement
|
| 232 |
+
|
| 233 |
+
This work was funded in part by the National Science and Technology Council, Taiwan, under grant MOST 111-2221-E-006-001 and in part by Google and Qualcomm through a Taiwan University Research Collaboration Project NAT-487842. This work cannot be done without the support of all of our labmates and families. So we would like to thank all of them. In particular, we thank Meng-Hsun Tsai, Daniel Tan, Runn Prasoprat, and Ching-Wen Yang for their help in reviewing the draft; we
|
| 234 |
+
|
| 235 |
+
thank Hsiu-Wen Li for his suggestion on changing different denoising rates; we thank Chia-Jen Yeh and Yi-Ting Li for their insightful discussion.
|
| 236 |
+
|
| 237 |
+
# References
|
| 238 |
+
|
| 239 |
+
Xinchi Chen, Zhan Shi, Xipeng Qiu, and Xuanjing Huang. 2017. Adversarial multi-criteria learning for Chinese word segmentation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1193-1203, Vancouver, Canada. Association for Computational Linguistics.
|
| 240 |
+
|
| 241 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 242 |
+
|
| 243 |
+
Thomas Emerson. 2005. The second international Chinese word segmentation bakeoff. In Proceedings of the Fourth SIGHAN Workshop on Chinese Language Processing.
|
| 244 |
+
|
| 245 |
+
Chelsea Finn, Pieter Abbeel, and Sergey Levine. 2017. Model-agnostic meta-learning for fast adaptation of deep networks. In Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pages 1126-1135. PMLR.
|
| 246 |
+
|
| 247 |
+
Jingjing Gong, Xinchi Chen, Tao Gui, and Xipeng Qiu. 2019. Switch-lstms for multi-criteria chinese word segmentation. Proceedings of the AAAI Conference on Artificial Intelligence, 33(01):6457-6464.
|
| 248 |
+
|
| 249 |
+
Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. 2014. Generative adversarial nets. In Advances in Neural Information Processing Systems, volume 27. Curran Associates, Inc.
|
| 250 |
+
|
| 251 |
+
Han He, Lei Wu, Hua Yan, Zhimin Gao, Yi Feng, and George Townsend. 2019. Effective neural solution for multi-criteria word segmentation. In Smart Intelligent Computing and Applications, pages 133-142, Singapore. Springer Singapore.
|
| 252 |
+
|
| 253 |
+
Kaiyu Huang, Degen Huang, Zhuang Liu, and Fengran Mo. 2020a. A joint multiple criteria model in transfer learning for cross-domain Chinese word segmentation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3873-3882, Online. Association for Computational Linguistics.
|
| 254 |
+
|
| 255 |
+
Weipeng Huang, Xingyi Cheng, Kunlong Chen, Taifeng Wang, and Wei Chu. 2020b. Towards fast and accurate neural Chinese word segmentation with multicriteria learning. In Proceedings of the 28th International Conference on Computational Linguistics, pages 2062-2072, Barcelona, Spain (Online). International Committee on Computational Linguistics.
|
| 256 |
+
Guangjin Jin and Xiao Chen. 2008. The fourth international Chinese language processing bakeoff: Chinese word segmentation, named entity recognition and Chinese POS tagging. In Proceedings of the Sixth SIGHAN Workshop on Chinese Language Processing.
|
| 257 |
+
Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Viégas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: Enabling zero-shot translation. Transactions of the Association for Computational Linguistics, 5:339-351.
|
| 258 |
+
Zhen Ke, Liang Shi, Erli Meng, Bin Wang, Xipeng Qiu, and Xuanjing Huang. 2020. Unified multi-criteria Chinese word segmentation with bert. arXiv preprint arXiv:2004.05808.
|
| 259 |
+
Zhen Ke, Liang Shi, Songtao Sun, Erli Meng, Bin Wang, and Xipeng Qiu. 2021. Pre-training with meta learning for Chinese word segmentation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5514-5523, Online. Association for Computational Linguistics.
|
| 260 |
+
John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, page 282-289, San Francisco, CA, USA. Morgan Kaufmann Publishers Inc.
|
| 261 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach.
|
| 262 |
+
Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In International Conference on Learning Representations.
|
| 263 |
+
Ji Ma, Kuzman Ganchev, and David Weiss. 2018. State-of-the-art Chinese word segmentation with BiLSTMs. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4902-4908, Brussels, Belgium. Association for Computational Linguistics.
|
| 264 |
+
Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward
|
| 265 |
+
|
| 266 |
+
Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.
|
| 267 |
+
Xipeng Qiu, Hengzhi Pei, Hang Yan, and Xuanjing Huang. 2020. A concise model for multi-criteria Chinese word segmentation with transformer encoder. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 2887-2897, Online. Association for Computational Linguistics.
|
| 268 |
+
Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: A simple way to prevent neural networks from overfitting. Journal of Machine Learning Research, 15(56):1929-1958.
|
| 269 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc.
|
| 270 |
+
William Yang Wang, Lingpeng Kong, Kathryn Mazaitis, and William W. Cohen. 2014. Dependency parsing for Weibo: An efficient probabilistic logic programming approach. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1152-1158, Doha, Qatar. Association for Computational Linguistics.
|
| 271 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 272 |
+
Naiwen Xue, Fei Xis, Fu-Dong Chiou, and Marta Palmer. 2005. The Penn Chinese Treebank: Phrase structure annotation of a large corpus. Natural Language Engineering, 11(2):207-238.
|
| 273 |
+
Nianwen Xue. 2003. Chinese word segmentation as character tagging. In International Journal of Computational Linguistics & Chinese Language Processing, Volume 8, Number 1, February 2003: Special Issue on Word Formation and Chinese Language Processing, pages 29-48.
|
| 274 |
+
Daniel Zeman, Jan Hajic, Martin Popel, Martin Potthast, Milan Straka, Filip Ginter, Joakim Nivre, and Slav Petrov. 2018. CoNLL 2018 shared task: Multilingual parsing from raw text to Universal Dependencies. In
|
| 275 |
+
|
| 276 |
+
Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, pages 1-21, Brussels, Belgium. Association for Computational Linguistics.
|
| 277 |
+
|
| 278 |
+
Meishan Zhang, Yue Zhang, Wanxiang Che, and Ting Liu. 2014. Type-supervised domain adaptation for joint segmentation and POS-tagging. In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics, pages 588-597, Gothenburg, Sweden. Association for Computational Linguistics.
|
| 279 |
+
|
| 280 |
+
# A Appendix
|
| 281 |
+
|
| 282 |
+
We list the preprocessing statistics in Table 7. The datasets' description and preprocessing steps can be found in Section 4.1. All datasets' licenses can be found in Table 8. Experiments on multiple trials can be found in Tables 9 and Table 10. Tables 11,12,13,14 give more examples to demonstrate our input-hint-based MCCWS model's capability of segmenting Chinese words with multiple criteria.
|
| 283 |
+
|
| 284 |
+
<table><tr><td>Dataset</td><td>Split</td><td>#C</td><td>#S</td><td>#W</td><td>#UC</td><td>#UW</td><td>OOV%</td><td>Avg.SL</td></tr><tr><td rowspan="3">AS</td><td>train</td><td>7,453,690</td><td>638,058</td><td>4,898,372</td><td>5,957</td><td>124,512</td><td>0</td><td>11.68</td></tr><tr><td>dev</td><td>805,692</td><td>70,895</td><td>551,209</td><td>4,353</td><td>32,000</td><td>1.86</td><td>11.36</td></tr><tr><td>test</td><td>193,723</td><td>14,429</td><td>122,610</td><td>3,579</td><td>18,093</td><td>3.73</td><td>13.43</td></tr><tr><td rowspan="3">CITYU</td><td>train</td><td>2,132,370</td><td>47,718</td><td>1,317,626</td><td>4,799</td><td>60,650</td><td>0</td><td>44.69</td></tr><tr><td>dev</td><td>220,243</td><td>5,301</td><td>138,004</td><td>3,234</td><td>16,372</td><td>3.79</td><td>41.55</td></tr><tr><td>test</td><td>66,353</td><td>1,492</td><td>40,936</td><td>2,643</td><td>8,633</td><td>7.38</td><td>44.47</td></tr><tr><td rowspan="3">CNC</td><td>train</td><td>8,908,376</td><td>207,001</td><td>5,841,321</td><td>6,643</td><td>113,223</td><td>0</td><td>43.04</td></tr><tr><td>dev</td><td>1,109,292</td><td>25,875</td><td>727,783</td><td>5,109</td><td>47,773</td><td>0.76</td><td>42.87</td></tr><tr><td>test</td><td>1,107,772</td><td>25,876</td><td>726,038</td><td>5,154</td><td>47,268</td><td>0.75</td><td>42.81</td></tr><tr><td rowspan="3">CTB6</td><td>train</td><td>1,108,461</td><td>24,416</td><td>678,811</td><td>4,201</td><td>42,086</td><td>0</td><td>45.40</td></tr><tr><td>dev</td><td>82,765</td><td>1,904</td><td>51,229</td><td>2,491</td><td>8,639</td><td>4.89</td><td>43.47</td></tr><tr><td>test</td><td>86,157</td><td>1,975</td><td>52,861</td><td>2,538</td><td>8,747</td><td>5.17</td><td>43.62</td></tr><tr><td rowspan="3">MSRA</td><td>train</td><td>3,615,524</td><td>78,227</td><td>2,144,776</td><td>5,023</td><td>71,399</td><td>0</td><td>46.22</td></tr><tr><td>dev</td><td>363,425</td><td>8,691</td><td>223,615</td><td>3,676</td><td>22,515</td><td>2.57</td><td>41.82</td></tr><tr><td>test</td><td>180,988</td><td>3,985</td><td>106,873</td><td>2,805</td><td>11,858</td><td>2.12</td><td>45.42</td></tr><tr><td rowspan="3">PKU</td><td>train</td><td>1,616,528</td><td>17,255</td><td>1,004,155</td><td>4,569</td><td>48,758</td><td>0</td><td>93.68</td></tr><tr><td>dev</td><td>170,803</td><td>1,917</td><td>105,792</td><td>3,019</td><td>13,613</td><td>3.15</td><td>89.10</td></tr><tr><td>test</td><td>168,992</td><td>1,949</td><td>104,372</td><td>2,881</td><td>12,456</td><td>3.31</td><td>86.71</td></tr><tr><td rowspan="3">SXU</td><td>train</td><td>744,162</td><td>15,407</td><td>474,758</td><td>4,026</td><td>28,207</td><td>0</td><td>48.30</td></tr><tr><td>dev</td><td>85,470</td><td>1,711</td><td>53,480</td><td>2,206</td><td>6,460</td><td>6.23</td><td>49.95</td></tr><tr><td>test</td><td>179,688</td><td>3,654</td><td>113,527</td><td>2,776</td><td>11,600</td><td>4.93</td><td>49.18</td></tr><tr><td rowspan="3">UD</td><td>train</td><td>147,295</td><td>3,997</td><td>98,608</td><td>3,390</td><td>15,930</td><td>0</td><td>36.85</td></tr><tr><td>dev</td><td>19,027</td><td>500</td><td>12,663</td><td>1,922</td><td>4,040</td><td>10.95</td><td>38.05</td></tr><tr><td>test</td><td>18,080</td><td>500</td><td>12,012</td><td>1,806</td><td>3,748</td><td>11.05</td><td>36.16</td></tr><tr><td rowspan="3">WTB</td><td>train</td><td>22,512</td><td>813</td><td>14,774</td><td>1,635</td><td>3,045</td><td>0</td><td>27.69</td></tr><tr><td>dev</td><td>2,875</td><td>95</td><td>1,843</td><td>770</td><td>837</td><td>18.39</td><td>30.26</td></tr><tr><td>test</td><td>2,838</td><td>92</td><td>1,860</td><td>733</td><td>731</td><td>15.05</td><td>30.85</td></tr><tr><td rowspan="3">ZX</td><td>train</td><td>96,647</td><td>2,373</td><td>67,648</td><td>2,289</td><td>6,770</td><td>0</td><td>40.73</td></tr><tr><td>dev</td><td>28,309</td><td>788</td><td>20,393</td><td>1,651</td><td>3,184</td><td>7.85</td><td>35.93</td></tr><tr><td>test</td><td>47,992</td><td>1,394</td><td>34,355</td><td>1,787</td><td>4,126</td><td>6.45</td><td>34.43</td></tr><tr><td rowspan="3">All</td><td>train</td><td>25,845,565</td><td>1,035,265</td><td>16,540,849</td><td>9,286</td><td>310,538</td><td>0</td><td>24.97</td></tr><tr><td>dev</td><td>2,887,901</td><td>117,677</td><td>1,886,011</td><td>7,134</td><td>95,398</td><td>1.30</td><td>24.54</td></tr><tr><td>test</td><td>2,052,583</td><td>55,346</td><td>1,315,444</td><td>6,789</td><td>77,145</td><td>1.21</td><td>37.09</td></tr></table>
|
| 285 |
+
|
| 286 |
+
Table 7: Dataset statistics (after preprocessing) for training, development, and test sets. #C: Number of characters. #S: Number of sentences. #W: Number of words. #UC: Number of unique characters. #UW: Number of unique words. OOV%: Out-of-vocabulary words rate. Avg.SL: Average sentence length. AS: Academia Sinica, Taiwan. CITYU: City University of Hong Kong, Hong Kong SAR. CNC: CNC corpus, China. CTB6: The Penn Chinese Treebank. MSRA: Microsoft Research, China. PKU: Peking University, China. SXU: Shanxi University, China. UD: Universal Dependencies. WTB: The Chinese Weibo Treebank. ZX: ZhuXian.
|
| 287 |
+
|
| 288 |
+
<table><tr><td>Dataset</td><td>Provider</td><td>License</td></tr><tr><td>AS</td><td>SIGHAN2005</td><td>Research Purpose</td></tr><tr><td>CITYU</td><td>SIGHAN2005</td><td>Research Purpose</td></tr><tr><td>CNC</td><td>CNCorpus</td><td>Research Purpose</td></tr><tr><td>CTB6</td><td>StanfordCoreNLP</td><td>Apache License</td></tr><tr><td>MSRA</td><td>SIGHAN2005</td><td>Research Purpose</td></tr><tr><td>PKU</td><td>SIGHAN2005</td><td>Research Purpose</td></tr><tr><td>SXU</td><td>Shan Xi University</td><td>Research Purpose</td></tr><tr><td>UD</td><td>UD Project</td><td>BY-NC-SA 4.0</td></tr><tr><td>WTB</td><td>Wang et al. (2014)</td><td>Research Purpose</td></tr><tr><td>ZX</td><td>Zhang et al. (2014)</td><td>Research Purpose</td></tr></table>
|
| 289 |
+
|
| 290 |
+
Table 8: All datasets' licenses.
|
| 291 |
+
|
| 292 |
+
<table><tr><td>Experiments</td><td>Seeds</td><td>AS</td><td>CITYU</td><td>CNC</td><td>CTB6</td><td>MSRA</td><td>PKU</td><td>SXU</td><td>UD</td><td>WTB</td><td>ZX</td><td>Avg.10</td></tr><tr><td rowspan="7">Ours</td><td>927</td><td>96.65</td><td>98.15</td><td>97.43</td><td>97.84</td><td>98.36</td><td>96.86</td><td>97.73</td><td>98.28</td><td>93.94</td><td>97.14</td><td>97.238</td></tr><tr><td>4332</td><td>96.66</td><td>98.10</td><td>97.44</td><td>97.96</td><td>98.47</td><td>96.95</td><td>97.70</td><td>98.19</td><td>93.69</td><td>97.00</td><td>97.216</td></tr><tr><td>6664</td><td>96.58</td><td>98.05</td><td>97.44</td><td>97.84</td><td>98.41</td><td>96.91</td><td>97.72</td><td>98.23</td><td>93.42</td><td>97.20</td><td>97.180</td></tr><tr><td>7155</td><td>96.73</td><td>98.02</td><td>97.45</td><td>97.91</td><td>98.37</td><td>96.90</td><td>97.79</td><td>98.30</td><td>93.56</td><td>97.03</td><td>97.206</td></tr><tr><td>8384</td><td>96.68</td><td>98.05</td><td>97.44</td><td>97.83</td><td>98.37</td><td>96.89</td><td>97.65</td><td>98.21</td><td>93.55</td><td>97.04</td><td>97.171</td></tr><tr><td>Avg.5</td><td>96.660</td><td>98.074</td><td>97.440</td><td>97.876</td><td>98.396</td><td>96.902</td><td>97.718</td><td>98.242</td><td>93.632</td><td>97.082</td><td>97.202</td></tr><tr><td>Std.5</td><td>0.049</td><td>0.046</td><td>0.006</td><td>0.051</td><td>0.041</td><td>0.029</td><td>0.045</td><td>0.042</td><td>0.176</td><td>0.075</td><td>0.0243</td></tr><tr><td rowspan="7">Ours+10%[UNC]</td><td>927</td><td>96.66</td><td>98.16</td><td>97.39</td><td>97.88</td><td>98.28</td><td>96.85</td><td>97.67</td><td>98.04</td><td>93.65</td><td>97.07</td><td>97.165</td></tr><tr><td>4332</td><td>96.65</td><td>97.99</td><td>97.37</td><td>97.90</td><td>98.26</td><td>96.88</td><td>97.63</td><td>97.93</td><td>93.32</td><td>97.04</td><td>97.097</td></tr><tr><td>6664</td><td>96.66</td><td>98.08</td><td>97.35</td><td>97.93</td><td>98.21</td><td>96.89</td><td>97.61</td><td>98.07</td><td>93.85</td><td>97.14</td><td>97.179</td></tr><tr><td>7155</td><td>96.77</td><td>98.00</td><td>97.36</td><td>97.93</td><td>98.27</td><td>96.83</td><td>97.64</td><td>98.11</td><td>93.54</td><td>97.03</td><td>97.148</td></tr><tr><td>8384</td><td>96.65</td><td>98.00</td><td>97.38</td><td>97.93</td><td>98.29</td><td>96.87</td><td>97.61</td><td>98.30</td><td>93.63</td><td>96.94</td><td>97.160</td></tr><tr><td>Avg.5</td><td>96.678</td><td>98.046</td><td>97.370</td><td>97.914</td><td>98.262</td><td>96.864</td><td>97.632</td><td>98.090</td><td>93.598</td><td>97.044</td><td>97.150</td></tr><tr><td>Std.5</td><td>0.046</td><td>0.066</td><td>0.014</td><td>0.021</td><td>0.028</td><td>0.022</td><td>0.022</td><td>0.121</td><td>0.172</td><td>0.065</td><td>0.0282</td></tr><tr><td rowspan="7">Ours+10%[UNC]+auto</td><td>927</td><td>96.63</td><td>97.26</td><td>96.92</td><td>96.87</td><td>95.35</td><td>95.35</td><td>92.94</td><td>97.94</td><td>92.45</td><td>96.29</td><td>95.800</td></tr><tr><td>4332</td><td>96.60</td><td>97.22</td><td>96.92</td><td>96.84</td><td>95.19</td><td>95.50</td><td>93.54</td><td>97.92</td><td>92.72</td><td>96.39</td><td>95.884</td></tr><tr><td>6664</td><td>96.64</td><td>97.30</td><td>97.01</td><td>96.89</td><td>92.78</td><td>95.08</td><td>93.43</td><td>97.98</td><td>92.26</td><td>96.05</td><td>95.542</td></tr><tr><td>7155</td><td>96.70</td><td>97.34</td><td>96.91</td><td>96.83</td><td>95.12</td><td>95.49</td><td>93.53</td><td>97.94</td><td>92.48</td><td>96.05</td><td>95.839</td></tr><tr><td>8384</td><td>96.64</td><td>97.17</td><td>96.86</td><td>96.88</td><td>95.52</td><td>95.44</td><td>93.24</td><td>98.06</td><td>92.48</td><td>96.23</td><td>95.852</td></tr><tr><td>Avg.5</td><td>96.642</td><td>97.258</td><td>96.924</td><td>96.862</td><td>94.792</td><td>95.372</td><td>93.336</td><td>97.968</td><td>92.478</td><td>96.202</td><td>95.783</td></tr><tr><td>Std.5</td><td>0.032</td><td>0.059</td><td>0.048</td><td>0.023</td><td>1.015</td><td>0.155</td><td>0.225</td><td>0.050</td><td>0.146</td><td>0.134</td><td>0.1236</td></tr></table>
|
| 293 |
+
|
| 294 |
+
Table 9: F1 results of 5 different trials. Experiment names are the same as in Table 2. Seed: Random seed set in an experiment. Avg.10: Average over 10 datasets. Avg.5: Average over 5 trials. Std.5: Standard deviation over 5 trials. We have $p < 0.05$ (precisely, $p = 0.0013$ ) when comparing the average Avg.10 over 5 trials of Ours to the previous SoTA (Avg.10 equals to 97.13, see Table 2) with t-test ( $\alpha = 0.01$ ). This means our MCCWS model is statistically significantly better than the previous SoTA.
|
| 295 |
+
|
| 296 |
+
<table><tr><td>Experiments</td><td>Seeds</td><td>AS</td><td>CITYU</td><td>CNC</td><td>CTB6</td><td>MSRA</td><td>PKU</td><td>SXU</td><td>UD</td><td>WTB</td><td>ZX</td><td>Avg.10</td></tr><tr><td rowspan="7">Ours</td><td>927</td><td>79.07</td><td>91.61</td><td>66.15</td><td>91.40</td><td>88.82</td><td>82.87</td><td>87.27</td><td>93.75</td><td>85.63</td><td>87.20</td><td>85.377</td></tr><tr><td>4332</td><td>79.52</td><td>91.77</td><td>66.05</td><td>91.78</td><td>88.34</td><td>83.80</td><td>87.29</td><td>93.68</td><td>85.63</td><td>87.74</td><td>85.560</td></tr><tr><td>6664</td><td>78.45</td><td>91.48</td><td>66.57</td><td>91.69</td><td>88.24</td><td>83.39</td><td>87.17</td><td>93.68</td><td>86.54</td><td>88.05</td><td>85.526</td></tr><tr><td>7155</td><td>80.52</td><td>91.16</td><td>66.17</td><td>91.86</td><td>88.34</td><td>83.23</td><td>87.55</td><td>93.41</td><td>85.63</td><td>87.56</td><td>85.543</td></tr><tr><td>8384</td><td>79.88</td><td>91.26</td><td>66.13</td><td>91.02</td><td>89.06</td><td>83.00</td><td>87.00</td><td>93.07</td><td>84.40</td><td>87.60</td><td>85.242</td></tr><tr><td>Avg.5</td><td>79.488</td><td>91.456</td><td>66.214</td><td>91.550</td><td>88.560</td><td>83.258</td><td>87.256</td><td>93.518</td><td>85.566</td><td>87.630</td><td>85.450</td></tr><tr><td>Std.5</td><td>0.703</td><td>0.223</td><td>0.183</td><td>0.307</td><td>0.321</td><td>0.325</td><td>0.179</td><td>0.252</td><td>0.681</td><td>0.275</td><td>0.1226</td></tr><tr><td rowspan="7">Ours+10%[UNC]</td><td>927</td><td>79.26</td><td>92.09</td><td>66.82</td><td>91.60</td><td>88.41</td><td>83.31</td><td>87.15</td><td>93.07</td><td>85.32</td><td>87.60</td><td>85.463</td></tr><tr><td>4332</td><td>79.07</td><td>91.03</td><td>65.96</td><td>91.40</td><td>87.73</td><td>83.39</td><td>86.76</td><td>93.07</td><td>83.49</td><td>87.78</td><td>84.968</td></tr><tr><td>6664</td><td>79.60</td><td>92.28</td><td>66.28</td><td>91.66</td><td>88.00</td><td>83.44</td><td>87.60</td><td>92.74</td><td>86.24</td><td>88.14</td><td>85.598</td></tr><tr><td>7155</td><td>80.63</td><td>91.48</td><td>65.71</td><td>91.80</td><td>88.62</td><td>83.67</td><td>87.41</td><td>93.07</td><td>85.63</td><td>87.83</td><td>85.585</td></tr><tr><td>8384</td><td>79.07</td><td>91.38</td><td>66.98</td><td>91.75</td><td>88.48</td><td>82.92</td><td>87.60</td><td>94.01</td><td>85.63</td><td>87.65</td><td>85.547</td></tr><tr><td>Avg.5</td><td>79.525</td><td>91.652</td><td>66.350</td><td>91.642</td><td>88.248</td><td>83.346</td><td>87.304</td><td>93.192</td><td>85.262</td><td>87.800</td><td>85.432</td></tr><tr><td>Std.5</td><td>0.585</td><td>0.464</td><td>0.487</td><td>0.139</td><td>0.331</td><td>0.244</td><td>0.318</td><td>0.429</td><td>0.935</td><td>0.189</td><td>0.2368</td></tr><tr><td rowspan="7">Ours+10%[UNC]+auto</td><td>927</td><td>79.50</td><td>90.62</td><td>65.44</td><td>89.86</td><td>74.94</td><td>79.29</td><td>77.58</td><td>92.94</td><td>83.18</td><td>86.66</td><td>82.001</td></tr><tr><td>4332</td><td>79.11</td><td>90.24</td><td>64.77</td><td>89.78</td><td>74.01</td><td>79.57</td><td>79.14</td><td>93.00</td><td>81.35</td><td>87.11</td><td>81.808</td></tr><tr><td>6664</td><td>80.12</td><td>91.26</td><td>65.64</td><td>89.83</td><td>64.24</td><td>78.28</td><td>80.57</td><td>93.07</td><td>83.49</td><td>85.94</td><td>81.244</td></tr><tr><td>7155</td><td>80.44</td><td>90.71</td><td>64.62</td><td>89.89</td><td>73.71</td><td>79.65</td><td>79.48</td><td>92.94</td><td>84.71</td><td>85.98</td><td>82.213</td></tr><tr><td>8384</td><td>79.67</td><td>90.20</td><td>66.07</td><td>90.10</td><td>76.79</td><td>79.03</td><td>78.57</td><td>93.07</td><td>84.71</td><td>87.29</td><td>82.550</td></tr><tr><td>Avg.5</td><td>79.768</td><td>90.606</td><td>65.308</td><td>89.892</td><td>72.738</td><td>79.164</td><td>79.068</td><td>93.004</td><td>83.487</td><td>86.596</td><td>81.963</td></tr><tr><td>Std.5</td><td>0.467</td><td>0.384</td><td>0.542</td><td>0.110</td><td>4.383</td><td>0.493</td><td>0.989</td><td>0.058</td><td>1.237</td><td>0.559</td><td>0.4358</td></tr></table>
|
| 297 |
+
|
| 298 |
+
Table 10: OOV recalls of 5 different trials. Experiment names are the same as in Table 3. Seed: Random seed set in an experiment. Avg.10: Average over 10 datasets. Avg.5: Average over 5 trials. Std.5: Standard deviation over 5 trials. We have $p < 0.05$ (precisely, $p = 0.0001$ ) when comparing the average Avg.10 over 5 trials of Ours to the previous SoTA (Avg.10 equals to 83.849, see Table 3) with t-test ( $\alpha = 0.01$ ). This means our MCCWS model is statistically significantly better than the previous SoTA.
|
| 299 |
+
|
| 300 |
+
<table><tr><td>Original Sentence</td><td>何樂而不為</td></tr><tr><td>AS-gold
|
| 301 |
+
CITYU-gold</td><td>何-樂-而-不-為
|
| 302 |
+
何樂而不為</td></tr><tr><td>AS-infer
|
| 303 |
+
CITYU-infer</td><td>何-樂-而-不-為</td></tr><tr><td>CNC-infer</td><td>何-樂-而-不-為</td></tr><tr><td>CTB6-infer</td><td>何-樂而-不為</td></tr><tr><td>MSRA-infer</td><td>何樂而不為</td></tr><tr><td>PKU-infer</td><td>何樂而不為</td></tr><tr><td>SXU-infer</td><td>何樂而不為</td></tr><tr><td>UD-infer</td><td>何-樂-而-不-為</td></tr><tr><td>WTB-infer</td><td>何樂而不為</td></tr><tr><td>ZX-infer</td><td>何-樂-而-不-為</td></tr><tr><td>[UNC]-infer</td><td>何樂而不為</td></tr></table>
|
| 304 |
+
|
| 305 |
+
Table 11: More examples showcase the capability of our input-hint-based MCCWS model. This example is the same one used in Table 1. We found three different ways to segment the same sentence "何樂而不為" (Why not do something?). We define symbols in the same way as in Table 5.
|
| 306 |
+
|
| 307 |
+
<table><tr><td>Original Sentence</td><td>四月二十六日</td></tr><tr><td>AS-gold</td><td>四月-二十六日</td></tr><tr><td>CITYU-gold</td><td>四月-二十六日</td></tr><tr><td>CNC-gold</td><td>四月-二十六日</td></tr><tr><td>MSRA-gold</td><td>四月二十六日</td></tr><tr><td>AS-infer</td><td>四月-二十六日</td></tr><tr><td>CITYU-infer</td><td>四月-二十六日</td></tr><tr><td>CNC-infer</td><td>四月-二十六日</td></tr><tr><td>CTB6-infer</td><td>四月-二十六日</td></tr><tr><td>MSRA-infer</td><td>四月二十六日</td></tr><tr><td>PKU-infer</td><td>四月-二十六日</td></tr><tr><td>SXU-infer</td><td>四月-二十六日</td></tr><tr><td>UD-infer</td><td>四月-二十六日</td></tr><tr><td>WTB-infer</td><td>四月-二十六日</td></tr><tr><td>ZX-infer</td><td>四月-二十六日</td></tr><tr><td>[UNC]-infer</td><td>四月-二十六日</td></tr></table>
|
| 308 |
+
|
| 309 |
+
Table 13: More examples showcase the capability of our input-hint-based MCCWS model. We found four different ways to segment the same sentence “四月二十六日” (April 26). We define symbols in the same way as in Table 5.
|
| 310 |
+
|
| 311 |
+
<table><tr><td>Original Sentence</td><td>一去不復返</td></tr><tr><td>AS-gold</td><td>一-去-不復-返</td></tr><tr><td>CITYU-gold</td><td>一去不復返</td></tr><tr><td>CNC-gold</td><td>一去不復返</td></tr><tr><td>MSRA-gold</td><td>一去不復返</td></tr><tr><td>PKU-gold</td><td>一去不復返</td></tr><tr><td>AS-infer</td><td>一-去-不復-返</td></tr><tr><td>CITYU-infer</td><td>一去不復返</td></tr><tr><td>CNC-infer</td><td>一去不復返</td></tr><tr><td>CTB6-infer</td><td>一-去-不復-返</td></tr><tr><td>MSRA-infer</td><td>一去不復返</td></tr><tr><td>PKU-infer</td><td>一去不復返</td></tr><tr><td>SXU-infer</td><td>一去不復返</td></tr><tr><td>UD-infer</td><td>一-去-不復-返</td></tr><tr><td>WTB-infer</td><td>一去不復返</td></tr><tr><td>ZX-infer</td><td>一-去-不-復-返</td></tr><tr><td>[UNC]-infer</td><td>一去不復返</td></tr></table>
|
| 312 |
+
|
| 313 |
+
Table 12: More examples showcase the capability of our input-hint-based MCCWS model. We found four different ways to segment the same sentence "一去不復返" (Once gone is gone). We define symbols in the same way as in Table 5.
|
| 314 |
+
|
| 315 |
+
<table><tr><td>Original Sentence</td><td>並不足以</td></tr><tr><td>AS-gold</td><td>並-不-足以</td></tr><tr><td>CITYU-gold</td><td>並-不足以</td></tr><tr><td>CNC-gold</td><td>並不-足以</td></tr><tr><td>AS-infer</td><td>並-不-足以</td></tr><tr><td>CITYU-infer</td><td>並-不足以</td></tr><tr><td>CNC-infer</td><td>並不-足以</td></tr><tr><td>CTB6-infer</td><td>並不-足以</td></tr><tr><td>MSRA-infer</td><td>並不-足以</td></tr><tr><td>PKU-infer</td><td>並-不足以</td></tr><tr><td>SXU-infer</td><td>並-不足以</td></tr><tr><td>UD-infer</td><td>並-不-足以</td></tr><tr><td>WTB-infer</td><td>並不-足以</td></tr><tr><td>ZX-infer</td><td>並-不-足以</td></tr><tr><td>[UNC]-infer</td><td>並-不-足以</td></tr></table>
|
| 316 |
+
|
| 317 |
+
Table 14: More examples showcase the capability of our input-hint-based MCCWS model. We found four different ways to segment the same sentence “并不足以” (Not enough). We define symbols in the same way as in Table 5.
|
| 318 |
+
|
| 319 |
+
# A For every submission:
|
| 320 |
+
|
| 321 |
+
A1. Did you describe the limitations of your work?
|
| 322 |
+
|
| 323 |
+
Left blank.
|
| 324 |
+
|
| 325 |
+
A2. Did you discuss any potential risks of your work?
|
| 326 |
+
|
| 327 |
+
Left blank.
|
| 328 |
+
|
| 329 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 330 |
+
|
| 331 |
+
Left blank.
|
| 332 |
+
|
| 333 |
+
□ A4. Have you used AI writing assistants when working on this paper?
|
| 334 |
+
|
| 335 |
+
Left blank.
|
| 336 |
+
|
| 337 |
+
# B Did you use or create scientific artifacts?
|
| 338 |
+
|
| 339 |
+
Left blank.
|
| 340 |
+
|
| 341 |
+
B1. Did you cite the creators of artifacts you used?
|
| 342 |
+
|
| 343 |
+
Left blank.
|
| 344 |
+
|
| 345 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts?
|
| 346 |
+
|
| 347 |
+
Left blank.
|
| 348 |
+
|
| 349 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?
|
| 350 |
+
|
| 351 |
+
Left blank.
|
| 352 |
+
|
| 353 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 354 |
+
|
| 355 |
+
Left blank.
|
| 356 |
+
|
| 357 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?
|
| 358 |
+
|
| 359 |
+
Left blank.
|
| 360 |
+
|
| 361 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 362 |
+
|
| 363 |
+
Left blank.
|
| 364 |
+
|
| 365 |
+
# C Did you run computational experiments?
|
| 366 |
+
|
| 367 |
+
Left blank.
|
| 368 |
+
|
| 369 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?
|
| 370 |
+
|
| 371 |
+
Left blank.
|
| 372 |
+
|
| 373 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 374 |
+
|
| 375 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? Left blank.
|
| 376 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run? Left blank.
|
| 377 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? Left blank.
|
| 378 |
+
|
| 379 |
+
# D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 380 |
+
|
| 381 |
+
Left blank.
|
| 382 |
+
|
| 383 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? Left blank.
|
| 384 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? Left blank.
|
| 385 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? Left blank.
|
| 386 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? Left blank.
|
| 387 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? Left blank.
|
advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9ebb667e50fcc32181f0ba1e167e535f38a3286c2c3c5a608401562b9dae71ac
|
| 3 |
+
size 1729264
|
advancingmulticriteriachinesewordsegmentationthroughcriterionclassificationanddenoising/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0458d56ebb7f54a3f03c7ea7acdc2df1604a1a8e4ca1ce77a4a4fcb54467a7c0
|
| 3 |
+
size 445075
|
adynamicprogrammingalgorithmforspanbasednestednamedentityrecognitioninon2/0495ac5b-b465-4228-b5d4-f69d569070af_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f58b2de40df79566175fa8c126dc414eb349740f33ef6d7c81cbb47320c11305
|
| 3 |
+
size 91859
|
adynamicprogrammingalgorithmforspanbasednestednamedentityrecognitioninon2/0495ac5b-b465-4228-b5d4-f69d569070af_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cea8bcf732709cd40dd21abfac8cfe0f3e7a92825ed97431b8a79a68c7bd3ad6
|
| 3 |
+
size 118209
|