Add Batch f2526411-6cd6-42f3-a81a-e71858ef00aa
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/200b215b-a02d-42ad-b7b9-f094eb49facd_content_list.json +3 -0
- abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/200b215b-a02d-42ad-b7b9-f094eb49facd_model.json +3 -0
- abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/200b215b-a02d-42ad-b7b9-f094eb49facd_origin.pdf +3 -0
- abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/full.md +420 -0
- abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/images.zip +3 -0
- abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/layout.json +3 -0
- abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/a0c61489-b7ee-4e8a-a1ea-343af81a791f_content_list.json +3 -0
- abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/a0c61489-b7ee-4e8a-a1ea-343af81a791f_model.json +3 -0
- abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/a0c61489-b7ee-4e8a-a1ea-343af81a791f_origin.pdf +3 -0
- abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/full.md +340 -0
- abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/images.zip +3 -0
- abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/layout.json +3 -0
- accuratepolyglotsemanticparsingwithdaggrammars/a362056e-eba4-4767-a0a5-8a53a63a1b9c_content_list.json +3 -0
- accuratepolyglotsemanticparsingwithdaggrammars/a362056e-eba4-4767-a0a5-8a53a63a1b9c_model.json +3 -0
- accuratepolyglotsemanticparsingwithdaggrammars/a362056e-eba4-4767-a0a5-8a53a63a1b9c_origin.pdf +3 -0
- accuratepolyglotsemanticparsingwithdaggrammars/full.md +344 -0
- accuratepolyglotsemanticparsingwithdaggrammars/images.zip +3 -0
- accuratepolyglotsemanticparsingwithdaggrammars/layout.json +3 -0
- acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/5cc63071-3d79-4af8-bd73-f6002698aecf_content_list.json +3 -0
- acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/5cc63071-3d79-4af8-bd73-f6002698aecf_model.json +3 -0
- acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/5cc63071-3d79-4af8-bd73-f6002698aecf_origin.pdf +3 -0
- acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/full.md +308 -0
- acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/images.zip +3 -0
- acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/layout.json +3 -0
- aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/6484072a-ed0b-4aaa-ae63-8c2e14ddc8fc_content_list.json +3 -0
- aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/6484072a-ed0b-4aaa-ae63-8c2e14ddc8fc_model.json +3 -0
- aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/6484072a-ed0b-4aaa-ae63-8c2e14ddc8fc_origin.pdf +3 -0
- aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/full.md +397 -0
- aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/images.zip +3 -0
- aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/layout.json +3 -0
- activelearningapproachestoenhancingneuralmachinetranslation/80b307df-133a-4edf-b209-48832d1b757e_content_list.json +3 -0
- activelearningapproachestoenhancingneuralmachinetranslation/80b307df-133a-4edf-b209-48832d1b757e_model.json +3 -0
- activelearningapproachestoenhancingneuralmachinetranslation/80b307df-133a-4edf-b209-48832d1b757e_origin.pdf +3 -0
- activelearningapproachestoenhancingneuralmachinetranslation/full.md +377 -0
- activelearningapproachestoenhancingneuralmachinetranslation/images.zip +3 -0
- activelearningapproachestoenhancingneuralmachinetranslation/layout.json +3 -0
- activesentencelearningbyadversarialuncertaintysamplingindiscretespace/2b4fe128-e55a-47fa-b1d6-7f9a308c4815_content_list.json +3 -0
- activesentencelearningbyadversarialuncertaintysamplingindiscretespace/2b4fe128-e55a-47fa-b1d6-7f9a308c4815_model.json +3 -0
- activesentencelearningbyadversarialuncertaintysamplingindiscretespace/2b4fe128-e55a-47fa-b1d6-7f9a308c4815_origin.pdf +3 -0
- activesentencelearningbyadversarialuncertaintysamplingindiscretespace/full.md +274 -0
- activesentencelearningbyadversarialuncertaintysamplingindiscretespace/images.zip +3 -0
- activesentencelearningbyadversarialuncertaintysamplingindiscretespace/layout.json +3 -0
- activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/7d6b49a6-e5eb-4a5c-9201-b68696e59c6c_content_list.json +3 -0
- activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/7d6b49a6-e5eb-4a5c-9201-b68696e59c6c_model.json +3 -0
- activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/7d6b49a6-e5eb-4a5c-9201-b68696e59c6c_origin.pdf +3 -0
- activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/full.md +281 -0
- activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/images.zip +3 -0
- activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/layout.json +3 -0
- actordoublecriticincorporatingmodelbasedcriticfortaskorienteddialoguesystems/ea6019ac-bb6b-406e-aca5-4d658eddd193_content_list.json +3 -0
- actordoublecriticincorporatingmodelbasedcriticfortaskorienteddialoguesystems/ea6019ac-bb6b-406e-aca5-4d658eddd193_model.json +3 -0
abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/200b215b-a02d-42ad-b7b9-f094eb49facd_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1131b752f73ca4c843460ffe6309290e1d6f702d77878be610f443594feffa36
|
| 3 |
+
size 86985
|
abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/200b215b-a02d-42ad-b7b9-f094eb49facd_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af5916f3684aa5a664fb97b0e2087a2ecc565822c9566739183c56adbbfc8128
|
| 3 |
+
size 100252
|
abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/200b215b-a02d-42ad-b7b9-f094eb49facd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6da46688ad6aa068062ebbc003fcd042331d00374164094ec61aa6f7481bfb89
|
| 3 |
+
size 403100
|
abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/full.md
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A BERT-based Distractor Generation Scheme with Multi-tasking and Negative Answer Training Strategies
|
| 2 |
+
|
| 3 |
+
Ho-Lam Chung $^{1}$ , Ying-Hong Chan $^{2}$ , Yao-Chung Fan $^{3}$
|
| 4 |
+
|
| 5 |
+
Department of Computer Science and Engineering
|
| 6 |
+
|
| 7 |
+
National Chung Hsing University,
|
| 8 |
+
|
| 9 |
+
Taichung, Taiwan
|
| 10 |
+
|
| 11 |
+
<sup>1</sup>holam.chung@protonmail.com
|
| 12 |
+
|
| 13 |
+
$^{2}$ harry831120@gmail.com
|
| 14 |
+
|
| 15 |
+
3yfan@nchu.edu.tw
|
| 16 |
+
|
| 17 |
+
# Abstract
|
| 18 |
+
|
| 19 |
+
In this paper, we investigate the following two limitations for the existing distractor generation (DG) methods. First, the quality of the existing DG methods are still far from practical use. There are still room for DG quality improvement. Second, the existing DG designs are mainly for single distractor generation. However, for practical MCQ preparation, multiple distractors are desired. Aiming at these goals, in this paper, we present a new distractor generation scheme with multi-tasking and negative answer training strategies for effectively generating multiple distractors. The experimental results show that (1) our model advances the state-of-the-art result from 28.65 to 39.81 (BLEU 1 score) and (2) the generated multiple distractors are diverse and shows strong distracting power for multiple choice question.
|
| 20 |
+
|
| 21 |
+
# 1 Introduction
|
| 22 |
+
|
| 23 |
+
Given a passage, a question, and an answer phrase, the goal of distractor generation (DG) is to generate context-related wrong options (i.e., distractor) for multiple-choice questions (MCQ). Pioneering research (Gao et al., 2019; Yeung et al., 2019; Zhou et al., 2019) have demonstrated the feasibility of generating distractors based on deep learning techniques.
|
| 24 |
+
|
| 25 |
+
While significant advances for DG were reported in the literature, we find that the existing DG results are still far from practical use. In this paper, we investigate the following two issues for distractor generation: (1) DG quality improvement and (2) Multiple distractor generation.
|
| 26 |
+
|
| 27 |
+
DG Quality Improvement There is still room to be improved for high-quality distractor generation. By manually examining the DG results generated by the existing method, we find that the results are still far from ideal for practical use. Thus, one
|
| 28 |
+
|
| 29 |
+
# Example 1
|
| 30 |
+
|
| 31 |
+
Context Omitted. (See Appendix)
|
| 32 |
+
|
| 33 |
+
Question
|
| 34 |
+
|
| 35 |
+
- Why did Mr.King want to send Henry away?
|
| 36 |
+
|
| 37 |
+
Answer
|
| 38 |
+
|
| 39 |
+
- Because Henry was too lazy.
|
| 40 |
+
|
| 41 |
+
Gen. Distractors
|
| 42 |
+
|
| 43 |
+
$d_{1}$ : Because Henry didn't want to go.
|
| 44 |
+
$d_{2}$ : Because Henry didn't want to go to the bookstore.
|
| 45 |
+
|
| 46 |
+
# Example 2
|
| 47 |
+
|
| 48 |
+
Context Omitted. (See Appendix)
|
| 49 |
+
|
| 50 |
+
Question
|
| 51 |
+
|
| 52 |
+
- Which of the following women would look most attractive?
|
| 53 |
+
|
| 54 |
+
Answer
|
| 55 |
+
|
| 56 |
+
A short red-haired woman who wears a purple hat.
|
| 57 |
+
|
| 58 |
+
Gen. Distractors
|
| 59 |
+
|
| 60 |
+
$d_{1}$ : A young woman who wears a white hat.
|
| 61 |
+
$d_2$ : A woman who wears a white hat.
|
| 62 |
+
|
| 63 |
+
Table 1: Two examples for showing the issue of generating multiple distractors by a simple beam search: Note that the generated distractors (i.e., $d_{1}$ and $d_{2}$ ) are the same statements with only slight word usage difference. Such results lower the distracting power for MCQ preparation.
|
| 64 |
+
|
| 65 |
+
goal of our research is to improve the DG quality further.
|
| 66 |
+
|
| 67 |
+
For the quality issues, in this paper, we explore BERT model's employment for performance improvement. As known, employing transformer-based language models has shown to be useful for improving NLP tasks. Thus, we investigate the BERT model's application for DG and report our design in this paper.
|
| 68 |
+
|
| 69 |
+
Multiple Distractor Generation The existing DG methods mainly focus on single distractor generation. However, for practical MCQ preparation, multiple distractors are desired. For more than one distractor, the existing practice is to keep multiple results given by a beam search strategy. However, we find that in many cases, the generated distractors are all referred to the same concept/thing. In
|
| 70 |
+
|
| 71 |
+
fact, the generated distractors are all from the same latent representation, which brings concerns that they might be semantically similar. In Table 1, we show two DG examples for this problems. In the illustrated examples, one can observe that the generated distractors are the same statements with only a slight word usage difference. Such results lower the distracting power for MCQ preparation.
|
| 72 |
+
|
| 73 |
+
For this limitation, we propose to view multiple distractor generation/selection problems as a coverage problem, rather than individually selecting top- $k$ distractors based on prediction probability. In other words, we propose to choose a distractor set, which maximizes the difficulty of multiple-choice questions, rather than individually picking results with the highest probability but with similar semantic
|
| 74 |
+
|
| 75 |
+
The contributions of this paper are (1) a new DG model based on the BERT model employment. The experiment evaluation with benchmarking datasets shows that our model outperforms the existing best models (Zhou et al., 2019) and pushes the state-of-the-art result from 28.65 to 39.81 (BLEU 1 score). (2) An investigation to employ the use of multiple-choice question answering task to evaluate the DG performance. (3) An investigation for considering the multiple distractors generation problem as a coverage problem. The experiment result demonstrates that the generated multiple distractors are diverse and show strong distracting power for multiple-choice questions.
|
| 76 |
+
|
| 77 |
+
The rest of this paper is organized as follows. In Section 2, we introduce our model design for a single distractor generation. In Section 3, we introduce to our multiple distractor schemes and the incorporation of the question-answer models for distractor selection. In Section 4, we report the result of performance analysis. In Section 5, we review the literature related to this work. Finally, Section 6 concludes our study and discusses future works.
|
| 78 |
+
|
| 79 |
+
# 2 BERT Distractor Generation
|
| 80 |
+
|
| 81 |
+
# 2.1 BERT Model Review
|
| 82 |
+
|
| 83 |
+
The BERT model and its family (Liu et al., 2019; Lan et al., 2019) are composed of a stack of multilayer bidirectional Transformer encoders. The input to a BERT model is a sequence of tokens. For a given token, its input representation to the BERT model is first constructed by summing the corresponding token, segment, and position embed
|
| 84 |
+
|
| 85 |
+
dings. After the input representation, the input embeddings travel through the pre-trained/fine-tuned BERT for task learning and prediction. In general, BERT can be employed in two-level language modeling tasks: sequence-level classification and token-level prediction tasks. For the tasks, there are three special tokens, [C], [S], and [M]. The embedding of the [C] token is designed to be used as the aggregate sequence representation for classification tasks. The [S] is designed to distinguish different sentences of a token sequence (to provide/signal information from multiple sentences, as the input token sequence can be a pack of multiple sentences). On the other hand, the [M] token is designed to be used in token-level prediction (e.g., predicting a masked token based on context words or predicting the starting/ending probabilities for span-based tasks such as QA tasks).
|
| 86 |
+
|
| 87 |
+
As reported in (Chan and Fan, 2019; Dong et al., 2019), BERT essentially is an auto-encoder language modeling design, which aims to reconstruct the original data from corrupted inputs. If BERT is asked to predict a sequence of consecutive masked tokens, it often produces incoherent and ramble results. For example, when using BERT to predict three consecutive [M] [M] [M] masked tokens, the same prediction result for the tokens are often observed. This is because the context (the information for predicting the tokens) for the masked tokens are nearly the same except for the position embedding, making the generated sentences incoherent. Thus, we take into consideration the previous decoded results for decoding the next distractor token, as will be introduced in the next subsection.
|
| 88 |
+
|
| 89 |
+
# 2.2 BERT-based Distractor Generation (BDG)
|
| 90 |
+
|
| 91 |
+
In a distractor generation scenario, there are three given inputs: (1) a paragraph $P$ , (2) an answer $A$ , and (3) a question $Q$ . For ease of discussion, let $C$ (referred to as a context sequence) denote the sequence of tokens given by concatenating $P$ , $Q$ , and $A$ .
|
| 92 |
+
|
| 93 |
+
Our BDG model generates distractor tokens in an auto-regressive manner. Specifically, the BDG model predicts a token at a time based on (1) the given context sequence $C$ and (2) the previously predicted distractor tokens. The BDG model takes multiple iterations to generate a distractor. In Table 2, we show a running example of the BDG model. Note that our model predicts a token based
|
| 94 |
+
|
| 95 |
+
<table><tr><td>Iter.</td><td>Input Sequence</td><td>Predict</td></tr><tr><td>1</td><td>[C] C [S] [M]</td><td>Because</td></tr><tr><td>2</td><td>[C] C [S] Because [M]</td><td>Henry</td></tr><tr><td>3</td><td>[C] C [S] Because Henry [M]</td><td>didn't</td></tr><tr><td>4</td><td>[C] C [S] Because Henry didn't [M]</td><td>want</td></tr><tr><td>5</td><td>[C] C [S] Because Henry didn't want [M]</td><td>to</td></tr><tr><td>6</td><td>[C] C [S] Because Henry didn't want to [M]</td><td>go</td></tr><tr><td>7</td><td>[C] C [S] Because Henry didn't want to go [M]</td><td>.</td></tr><tr><td>8</td><td>[C] C [S] Because Henry didn't want to go. [M]</td><td>[S]</td></tr></table>
|
| 96 |
+
|
| 97 |
+
Table 2: A Running Example for the BDG scheme
|
| 98 |
+
|
| 99 |
+
on $C$ and the previously generated tokens at each iteration. For example, at Iteration 1, we generate "Because" based on $C$ . At Iteration 2, we generate "Henry" based on $C$ and "Because" tokens, and Iteration 3, we generate "didn't" based on $C$ , "Because", and "Henry". The generation terminates when [S] is predicted. In this example, "Because Henry didn't want to go." is the final generated result.
|
| 100 |
+
|
| 101 |
+
Specifically, the input sequence $X_{i}$ at Iteration $i$ to BERT is
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
X _ {i} = \left(\left[ \mathrm {C} \right], C, \left[ \mathrm {S} \right], \hat {d} _ {1}, \dots , \hat {d} _ {i}, \left[ \mathrm {M} \right]\right)
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
Let $\mathbf{h}_{[\mathbb{M}]}\in \mathbb{R}^h$ denote the hidden representation of [M] of $X_{i}$ returned by BERT transformer stacks. The prediction of $\hat{d}_i$ is given by a linear layer transformation $\mathbf{W}_{\mathrm{DG}}\in \mathbb{R}^{h\times |V|}$ and a softmax activation to all vocabulary dimension as follows.
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\begin{array}{l} p (w | X _ {i}) = \operatorname {s o f t m a x} \left(\mathbf {h} _ {[ M ]} \cdot \mathbf {W} _ {D G} + \mathbf {b} _ {D G}\right) \\ \hat {d _ {i + 1}} = \operatorname {a r g m a x} _ {w} P r (w | X _ {i}) \\ \end{array}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
Subsequently, the newly generated token $\hat{d}_i$ is appended into $X_{i + 1}$ and the distractor generation process is repeated based on the new $X_{i + 1}$ until [S] is predicted. Our loss function is as follows.
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\underset {\theta} {\text {m i n i m i z e}} - \sum_ {\forall (C, D)} \sum_ {i = 0} ^ {| D |} \left(\log_ {2} p \left(d _ {i + 1} \mid C, d _ {1: i}; \theta\right)\right)
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
# 2.3 Multi-task with Parallel MLM
|
| 120 |
+
|
| 121 |
+
From the experiment results (will be presented in the later section), we see the BDG model advances the state-of-the-art result (Zhou et al., 2019) from 28.65 to 35.30 (BLEU 1 score). While the token-level evaluation result looks promising, we find that generation results still have room to be improved.
|
| 122 |
+
|
| 123 |
+
For performance improvement, we first propose to jointly train BDG and a parallel MLM (P-MLM)
|
| 124 |
+
|
| 125 |
+
architecture for distractor generation to enhance the quality of BDG. The P-MLM scheme for generating distractors is structured as follows.
|
| 126 |
+
|
| 127 |
+
For a given context $C$ , the input sequence $X$ to P-MLM model is formulated as
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
X = \big ([ \mathsf {C} ], C, [ \mathsf {S} ], [ \mathsf {M} ] _ {\mathrm {d} _ {1}}, [ \mathsf {M} ] _ {\mathrm {d} _ {2}}, \dots , [ \mathsf {M} ] _ {\mathrm {d} _ {| \mathsf {D} |}} \big)
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
Let $\mathbf{h}_{[\mathsf{M}]_{\mathrm{d_i}}}\in \mathbb{R}^h$ denote the hidden representation of $[\mathsf{M}]_{\mathrm{d_i}}$ of $X$ returned by BERT transformer stacks. The prediction of $\hat{q}_i$ is given by a linear layer transformation $\mathbf{W}_{\mathsf{P - MLM}}\in \mathbb{R}^{h\times |V|}$ and applying a softmax activation to all vocabulary dimension as follows.
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\begin{array}{l} p (w | X) = \operatorname {s o f t m a x} \left(\mathbf {h} _ {[ \mathbb {M} ] _ {\mathrm {d} _ {\mathrm {i}}}} \cdot \mathbf {W} _ {\mathbb {P} - \mathbb {M L M}} + \mathbf {b} _ {\mathbb {P} - \mathbb {M L M}}\right) \\ \hat {d} _ {i} = \operatorname {a r g m a x} _ {w} P r (w | X) \\ \end{array}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
The loss function for P-MLM is
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\underset {\theta} {\text {m i n i m i z e}} - \sum_ {\forall (C, D)} \phi_ {\mathrm {P - M L M}} (C, D)
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\phi_ {\mathrm {P - M L M}} (C, D) = \sum_ {\forall d _ {i}} \log_ {2} p (d _ {i} | C, [ \mathrm {M} ] _ {d _ {i}}; \theta)
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
We propose to jointly train P-MLM and BDG by the following multi-tasking loss function. Note that $\gamma$ is a hyper-parameter controlling the weighting between the two tasks. See also the effect of the $\gamma$ value in Subsection 4.6.
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\begin{array}{l} \underset {\theta} {\text {m i n i m i z e}} - \sum_ {\forall (C, D)} \left[ \phi_ {\mathrm {B D G}} (C, D) + \gamma \cdot \phi_ {\mathrm {P - M L M}} (C, D) \right], \\ \phi_ {\mathrm {B D G}} (C, D) = \sum_ {i = 0} ^ {| D |} \left(\log_ {2} p \left(d _ {i + 1} \mid C, d _ {1: i}; \theta\right)\right) \\ \end{array}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
The multi-task design is motivated by the following observations. First, as mentioned, we target
|
| 156 |
+
|
| 157 |
+
<table><tr><td></td><td>P.M.</td><td>Gold</td></tr><tr><td># of cases on BLEU 1</td><td>57</td><td>12</td></tr><tr><td># of cases on BLEU 2</td><td>55</td><td>4</td></tr><tr><td># of cases on BLEU 3</td><td>48</td><td>0</td></tr><tr><td># of cases on BLEU 4</td><td>35</td><td>0</td></tr><tr><td># of cases on ROUGE-L</td><td>55</td><td>1</td></tr></table>
|
| 158 |
+
|
| 159 |
+
Table 3: Answer Copying Problem on P.M.
|
| 160 |
+
|
| 161 |
+
at learning distractor generation from real reading comprehension examination (RACE-like MCQ), and we find that many questions in the RACE dataset are summary-oriented; many questions are about "what is the best title for this passage?" or "what is this passage about?" Such questions require the model to have the capability of passage semantic summarization. While the original BDG scheme design successfully generates fluent question sentences, we find that it may over-fit in sentence writing and under-fit in learning the passage semantic understanding capability. Note that the sequential-MLM design (BDG) essentially is a one-by-one masked token prediction architecture. Such a method may over-focus on the guess of a single token and ignore the overall semantic understanding. Thus, we propose to incorporate the multi-task learning setting to prevent the potential over-fitting problem. From the experiments, we find the multitask learning setting indeed improves the quality of distractor generation.
|
| 162 |
+
|
| 163 |
+
# 2.4 Answer Negative Regularization
|
| 164 |
+
|
| 165 |
+
In addition to the multi-task design, from the DG result examination, we find another observation that in many cases, there is an answer copying problem; the generated distractors are similar to the given answers. To better see this phenomenon, we experiment to count such cases. In the following table, we show the number of cases that the generated distractor $\hat{D}$ has a token-level similarity score greater than 0.95 with respect to the answer $A$ . We also show the cases for the gold distractors (the human-invented distractors from the RACE dataset). By comparison in Table 3, there is a significant gap between the human-invented distractors and the model generated ones.
|
| 166 |
+
|
| 167 |
+
Motivated by the answer copying problem, we propose to incorporate a loss (referred to as answer negative loss) to discourage predicting tokens in $A$ when predicting $\hat{d}_i$ . With the answer negative loss,
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 1: The Multi-tasking Architecture
|
| 171 |
+
|
| 172 |
+
our loss function for BDG is as follows.
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\underset {\theta} {\text {m i n i m i z e}} - \sum_ {\forall (C, D)} \left(\phi_ {\mathrm {A N}} (C, D) + \gamma \cdot \phi_ {\mathrm {P - M L M}} (C, D)\right),
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
$$
|
| 179 |
+
\begin{array}{l} \phi_ {\mathrm {A N}} = \sum_ {i = 0} ^ {| D |} (\log_ {2} p (d _ {i + 1} | C, d _ {1: i}; \theta) + \\ \sum_ {\forall a _ {j} \in A} \log_ {2} (1 - p \left(a _ {j} \mid C, [ \mathrm {M} ] _ {a _ {j}}; \theta\right)) \tag {1} \\ \end{array}
|
| 180 |
+
$$
|
| 181 |
+
|
| 182 |
+
The design of answer negative loss is motivated by that we expect to regulate the generated distractor $\hat{D}$ to use words different from $A$ .
|
| 183 |
+
|
| 184 |
+
The overall architecture for training our BDG model is shown in Figure 1. The core structure for our distractor generation is mainly based on the sequential recurrent MLM decoding mechanism. That is, during the testing stage, we use the results from the sequential recurrent MLM decoding part. However, during the training stage, we incorporate the parallel MLM decoding mechanism by jointly considering answer negative regularization and sentence-level distractor loss, as shown in the right-part of the architecture in Figure 1.
|
| 185 |
+
|
| 186 |
+
# 3 Multiple Distractor Generation
|
| 187 |
+
|
| 188 |
+
# 3.1 Selecting Distractors by Entropy Maximization
|
| 189 |
+
|
| 190 |
+
As mentioned, another point that can be improved for DG is that the existing methods mainly focus on single distractor generation. For having more than one distractor, the existing practices are to select the results on different beam search paths as
|
| 191 |
+
|
| 192 |
+
multiple options for distractor generation, which lowers the power of distracting a reader for MCQ preparation.
|
| 193 |
+
|
| 194 |
+
Our viewpoint is to select a distractor set (by considering semantic diversity) rather than individually selecting top-k distractors based on prediction probability.
|
| 195 |
+
|
| 196 |
+
Based on this view, we propose to incorporate a multi-choice reading comprehension (MRC) model for ranking/selecting distractor sets. First, let $\mathbb{M}_{\mathrm{MRC}}$ be a MRC model. Note that $\mathbb{M}_{\mathrm{MRC}}$ takes a passage $P$ , a question $Q$ , and a set of options (including an answer $A$ and distractors $D_{1}, D_{2}, \ldots, D_{n}$ ) as input and outputs $[p_{A}, p_{D_{1}}, \ldots, p_{D_{n}}]$ as the answer probabilities of the options. $\mathbb{M}_{\mathrm{MRC}}$ is trained by maximizing the answer probability $p_{A}$ while minimizing the probabilities $[p_{D_{1}}, \ldots, p_{D_{n}}]$ .
|
| 197 |
+
|
| 198 |
+
With $\mathbb{M}_{\mathrm{MRC}}$ , our idea is as follows. First, let $\mathbb{DG}_{\mathrm{BDG}}$ be a BDG model for distractor generation. Also, let $\hat{D} = \{\hat{d}_1,\hat{d}_2,\dots,\hat{d}_n\}$ be the set of generated distractors by the BDG model. In a common MCQ setting, there are four options (one answer $A$ and three distractors $d_{i},d_{j},d_{k}$ ) for each question. Our idea is to enumerate all possible triples from $\{\hat{d}_1,\hat{d}_2,\dots,\hat{d}_n\}$ . That is, we have a triple set
|
| 199 |
+
|
| 200 |
+
$$
|
| 201 |
+
\{(d _ {i}, d _ {j}, d _ {k}) | i \neq j \neq k, d _ {i}, d _ {j}, d _ {k} \in \hat {D} \}
|
| 202 |
+
$$
|
| 203 |
+
|
| 204 |
+
For a given passage $P$ , question $Q$ , and answer $A$ , our goal is to find a triple $(d_i, d_j, d_k)$ to form an option set $O$ (i.e., $\{d_i, d_j, d_k, A\}$ ) that maximizes the following entropy function.
|
| 205 |
+
|
| 206 |
+
$$
|
| 207 |
+
\text {m a x i m i z e} - \sum_ {\forall o _ {i} \in O} p _ {o _ {i}} \log_ {2} p _ {o _ {i}} \tag {2}
|
| 208 |
+
$$
|
| 209 |
+
|
| 210 |
+
# 3.2 BDG-EM
|
| 211 |
+
|
| 212 |
+
The idea of selecting distractors by entropy maximization can be further generalized by employing multiple DG models. For having multiple DG models, our idea is to leverage the variants of the BDG model (i.e., models with/without answer negative regularization or with/without both answer negative regularization and P-MLM multi-task training). Let $\hat{D},\hat{D}_{\mathrm{PM}}$ ,and $\hat{D}_{\mathrm{PM + AN}}$ be the BDG model without both answer negative regularization and P-MLM multi-task training, the BDG model without answer negative regularization, and the full BDG model. That is, we have a triple set as follows.
|
| 213 |
+
|
| 214 |
+
$$
|
| 215 |
+
\left\{\left(d _ {i}, d _ {j}, d _ {k}\right) \mid d _ {i} \in \hat {D}, d _ {j} \in \hat {D} _ {\mathrm {P M}}, d _ {k} \in \hat {D} _ {\mathrm {P M + A N}} \right\}
|
| 216 |
+
$$
|
| 217 |
+
|
| 218 |
+
With the triple set, the set that maximizes Eq. (2) is selected as final distractors.
|
| 219 |
+
|
| 220 |
+
# 4 Performance Evaluation
|
| 221 |
+
|
| 222 |
+
# 4.1 Experimental Settings
|
| 223 |
+
|
| 224 |
+
Datasets We follow the setting (Gao et al., 2019) to evaluate our framework with the RACE (Lai et al., 2017) dataset. RACE contains 27,933 articles with 97,687 questions from English examinations of Chinese students from grade 7 to 12. We use data split setting from (Gao et al., 2019). Table 4 reports the statistics for the test data set. All sentences are tokenized by the WordPiece tokenizer (Wu et al., 2016).
|
| 225 |
+
|
| 226 |
+
Implementation Details Our models are implemented based on huggingface transformers framework (Wolf et al., 2019). All experiments are based on bert-base-cased model. For optimization in the training, we use AdamW as the optimizer and the initial learning rate 5e-5 for all baselines and our model. The maximum number of epoch is set to 6 with a batch size of 30 on two RTX Titan GPUs. We also make our code and model available at https://github.com/voidful/BDG
|
| 227 |
+
|
| 228 |
+
# 4.2 Compared Methods
|
| 229 |
+
|
| 230 |
+
In the experiments, we mainly compare the following distractor generation methods.
|
| 231 |
+
|
| 232 |
+
- CO-Att. We compare with the state-of-the-art method reported in (Zhou et al., 2019). The model is based on LSTM augmented by coattention mechanism.
|
| 233 |
+
- DS-Att. We also compare with the method based on LSTM augmented by dynamic and static attention designed reported in (Gao et al., 2019). This method is served as a baseline for distractor generation based on seq2seq RNN architectures.
|
| 234 |
+
- GPT We also experiment with a model based on GPT (Radford et al., 2018) to learn the distractor generation. This scheme can be served as a baseline based on transformer-based pretrained model.
|
| 235 |
+
- BDG The scheme without the answer negative technique and parallel masked-LM multi-task training.
|
| 236 |
+
- $\mathbf{BDG}_{\mathbf{PM}}$ The BDG scheme with the parallel masked-LM multi-task training ( $\gamma = 1$ ).
|
| 237 |
+
- $\mathbf{BDG_{AN + PM}}$ The BDG scheme with both techniques ( $\gamma = 1$ ).
|
| 238 |
+
|
| 239 |
+
<table><tr><td>Train samples</td><td>96501</td></tr><tr><td>Test samples</td><td>12284</td></tr><tr><td>Avg.article length</td><td>335.6</td></tr><tr><td>Avg.distractor length</td><td>8.6</td></tr><tr><td>Avg(question length</td><td>10.0</td></tr><tr><td>Avg_answer length</td><td>8.3</td></tr><tr><td>Avg.distractor number</td><td>2.1</td></tr></table>
|
| 240 |
+
|
| 241 |
+
Table 4: Training Data Statistics
|
| 242 |
+
|
| 243 |
+
# 4.3 Token Score Comparison
|
| 244 |
+
|
| 245 |
+
We employ BLEU score (Papineni et al., 2002) and ROUGE (L) (Lin, 2004) scores to evaluate the performance of the compared methods. The BLEU scores evaluate average n-gram precision on a set of reference sentences, with a penalty for overly long sentences. The ROUGE (L) measure is the recall of longest common sub-sequences.
|
| 246 |
+
|
| 247 |
+
The comparison results are summarized in Table 5. There are three observations to note. First, one can see that our models significantly outperform the existing methods (i.e., DS-Att. and CO-Att.). Our best performing model advances the state-of-the-art result from 28.65 to 39.81 (BLEU 1 score). Second, as shown, the methods based on transformer models outperform the RNN-based models. This result again demonstrates the effectiveness of the employment of pre-trained transformer model to the downstream tasks. Third, one may notice that our models based on BERT outperforms the GPT-based model. We believe the reason is that the distractors in the RACE data set is mostly a summary type sentence that requires semantic understanding. The GPT-based model may over-focus on sentence writing, and fail to capture the whole context to generate summary-type sentences, and therefore obtain lower scores.
|
| 248 |
+
|
| 249 |
+
We also provide experiment results to observe the effectiveness on reducing the answer copying problem discussed in Subsection 2. In Table 6, we show the number of cases that the generated distractor $\hat{D}$ has a token-level similarity score greater than 0.95 with respect to the context answer $A$ . From the experiment result, we see that there are significant improvement made by the BDG schemes.
|
| 250 |
+
|
| 251 |
+
# 4.4 MCQ Model Accuracy Comparison
|
| 252 |
+
|
| 253 |
+
In this set of experiment, we evaluate the DG quality by the RACE reading comprehension task (Lai et al., 2017). Our idea is that a poorly generated
|
| 254 |
+
|
| 255 |
+
DG result will reduce the difficulty of a MCQ task. Thus, we propose to incorporate a MCQ answering model (also trained by the RACE dataset) to evaluate the accuracy of a multiple-choice question with the distractors generated by the compared model. Specifically, given $C$ , $Q$ , and $A$ , we generate three distractors $D_{1}$ , $D_{2}$ , and $D_{3}$ , and then submit the multiple-choice question to the RACE model. Randomly generated results will be the easiest task to solve, and the best generated results will bring challenges to the MCQ model. Therefore, we use the accuracy of the model as a metric. The higher the accuracy, the worse the generation quality.
|
| 256 |
+
|
| 257 |
+
The training details of the RACE model is as follows. We use PyTorch Transformers (Wolf et al., 2019) and the roberta-base-openai-detector fine-tuned by OpenAI (Solaiman et al., 2019) with max 512 tokens to implement the model. AdamW with a Learning rate $= 1\mathrm{e} - 5$ is used for fine-tuning. The model is trained for 10 epoch on 2 GPUs (V100) with gradient accumulation per two steps, which makes the batch size approximately equal to 18. Model checkpoints are saved and evaluated on the validation set every 5,000 steps. We select the top checkpoint based on evaluation loss on the validations set. The RACE dataset includes middle and high dataset. The total number of passages and questions is 27,933 and 97,687 respectively. Middle dataset averages about 250 words per passage while the High dataset averages 350 words per passage.
|
| 258 |
+
|
| 259 |
+
In this set of experiment, we compare BDG, $\mathrm{BDG}_{\mathrm{PM}}$ , $\mathrm{BDG}_{\mathrm{AN} + \mathrm{PM}}$ , the BDG model with entropy maximization (called $\mathrm{BDG}_{\mathrm{EM}}$ ) (introduced in Subsection 3.2) by setting the beam search size to 3, and the BDG model ensemble introduced in Subsection 3.2. In addition, we also experiment with the GPT, a scheme that takes randomly selected distractors from the data as the DG result, and the scheme uses the gold distractors. The results of the compared methods are summarized in Table 7.
|
| 260 |
+
|
| 261 |
+
We have the following findings to note about the results shown in Table 7. First, as expected, the method with randomly selected distractors makes the MCQA model has the highest accuracy, as the randomly selected distractors obviously lower the difficulty of MCQ task. Second, all our models outperform the MCQ with the gold distractors, showing the effectiveness of the proposed models. Third, as expected, our $\mathrm{BDG_{EM}}$ provides the best performing result on this metric.
|
| 262 |
+
|
| 263 |
+
<table><tr><td></td><td>BLEU 1</td><td>BLEU 2</td><td>BLEU 3</td><td>BLEU 4</td><td>ROUGE L</td></tr><tr><td>BDGAN+PM</td><td>39.52</td><td>24.29</td><td>17.28</td><td>13.28</td><td>33.40</td></tr><tr><td>BDGPM</td><td>39.81</td><td>24.81</td><td>17.66</td><td>13.56</td><td>34.01</td></tr><tr><td>BDG</td><td>35.30</td><td>20.65</td><td>13.66</td><td>9.53</td><td>31.11</td></tr><tr><td>GPT</td><td>36.49</td><td>20.75</td><td>13.31</td><td>9.31</td><td>31.59</td></tr><tr><td>DS-Att.</td><td>27.32</td><td>14.69</td><td>9.29</td><td>6.47</td><td>15.12</td></tr><tr><td>CO-Att.</td><td>28.65</td><td>15.15</td><td>9.77</td><td>7.01</td><td>15.39</td></tr></table>
|
| 264 |
+
|
| 265 |
+
Table 5: Performance Comparison on Token Scores
|
| 266 |
+
|
| 267 |
+
<table><tr><td></td><td>BDGAN+PM</td><td>BDGPM</td><td>BDG</td><td>GPT</td><td>Gold</td><td>Random</td></tr><tr><td>BLEU 1</td><td>43</td><td>57</td><td>115</td><td>124</td><td>12</td><td>0</td></tr><tr><td>BLEU 2</td><td>40</td><td>55</td><td>115</td><td>121</td><td>4</td><td>0</td></tr><tr><td>BLEU 3</td><td>37</td><td>48</td><td>109</td><td>109</td><td>0</td><td>0</td></tr><tr><td>BLEU 4</td><td>30</td><td>35</td><td>97</td><td>88</td><td>0</td><td>0</td></tr><tr><td>ROUGE-L</td><td>42</td><td>55</td><td>122</td><td>123</td><td>1</td><td>0</td></tr></table>
|
| 268 |
+
|
| 269 |
+
Table 6: The Effect on Mitigating Answer Copying Problem
|
| 270 |
+
|
| 271 |
+
<table><tr><td></td><td>Accuracy</td></tr><tr><td>Random Selected Distractors</td><td>88.10%</td></tr><tr><td>Gold Distractor</td><td>78.00%</td></tr><tr><td>GPT</td><td>78.07%</td></tr><tr><td>BDG</td><td>73.96%</td></tr><tr><td>BDGPM</td><td>74.34%</td></tr><tr><td>BDGAN+PM</td><td>74.05%</td></tr><tr><td>BDGEM</td><td>69.44%</td></tr></table>
|
| 272 |
+
|
| 273 |
+
Table 7: Comparison by MCQ Accuracy
|
| 274 |
+
|
| 275 |
+
# 4.5 Qualitative Examination by Case Study
|
| 276 |
+
|
| 277 |
+
In this subsection, we present showcases to see the improvement on multiple distractor generation scenario. We use the same examples introduced in Section 1 for comparison. First, as mentioned, the naive employment of beam search strategy produces similar DG results. As shown in the examples, the distractors generated by BDG are about the same concept. However, as shown in Table 8, we see the $\mathrm{BDG_{EM}}$ produce more diverse distractors with respect to each other. The results demonstrate the effectiveness of our $\mathrm{BDG_{EM}}$ scheme for generating multiple distractors for MCQ preparation.
|
| 278 |
+
|
| 279 |
+
# 4.6 Parameter Study on $\gamma$
|
| 280 |
+
|
| 281 |
+
In this subsection, we examine the effects on varying the values of the parameter $\gamma$ . In Table 9, we show the results. From the result, we can see that the best setting for $\gamma$ is 6, and for BDG trained by answer negative and parallel-MLM, the best setting for $\gamma$ is 7.
|
| 282 |
+
|
| 283 |
+
# 5 Related Work
|
| 284 |
+
|
| 285 |
+
The DG research can be categorized from different perspectives. First, for DG task type, there are two main task categories for DG: cloze-style distractor generation and reading comprehension (RC) distractor generation. In cloze-style DG task, it is viewed as a word filling problem. In general, the first step is to extract distractor candidates from context or some knowledge base, and then the next step is to rank the extracted distractors as a final result. Along this direction, the models are mainly based on similarity heuristic (Sumita et al., 2005; Mitkov et al., 2006; Guo et al., 2016; Ren and Zhu, 2020) or supervised machine learning way (Liang et al., 2018; Yeung et al., 2019). The distractors generated for cloze-style DG are mainly word/phrase level. On the other hand, the RC-type QG focuses on generating sentence-level distractors for reading comprehension level testing, such as summarizing article or understanding author opinion (Gao et al., 2019; Zhou et al., 2019). For the sentence-level distractors, neural models are commonly employed as it is difficult to generate a semantic rich and fluent distractor from question, content, and answer. In this paper, we also focus on generative sentence-level DG for RC task. However, as mentioned in the introduction, we find the existing DG results are still far from human level. The best SOTA result (in terms of BLEU 1 score) is 29, which is far from the ideal result for practical use. Aiming at this point, we explore the employment of transformer-based pre
|
| 286 |
+
|
| 287 |
+
# Example 1
|
| 288 |
+
|
| 289 |
+
Context Omitted. (See Appendix)
|
| 290 |
+
|
| 291 |
+
Question
|
| 292 |
+
|
| 293 |
+
Why did Mr.King want to send Henry away?
|
| 294 |
+
|
| 295 |
+
Answer
|
| 296 |
+
|
| 297 |
+
- Because Henry was too lazy.
|
| 298 |
+
|
| 299 |
+
BDG
|
| 300 |
+
|
| 301 |
+
$\cdot d_{1}$ : Because Henry didn't want to go.
|
| 302 |
+
$\cdot d_{2}$ : Because Henry didn't want to go to the bookstore.
|
| 303 |
+
$\cdot d_{3}$ : Because Henry didn't want to go out.
|
| 304 |
+
|
| 305 |
+
BDGEM
|
| 306 |
+
|
| 307 |
+
$\cdot d_{1}$ : Because Henry didn't want to go.
|
| 308 |
+
$d_{2}$ : Because Henry wanted to be rich.
|
| 309 |
+
$\cdot d_{3}$ : Because Henry wanted to be a clever man.
|
| 310 |
+
|
| 311 |
+
# Example 2
|
| 312 |
+
|
| 313 |
+
Context Omitted. (See Appendix)
|
| 314 |
+
|
| 315 |
+
Question
|
| 316 |
+
|
| 317 |
+
- Which of the following women would look most attractive?
|
| 318 |
+
|
| 319 |
+
Answer
|
| 320 |
+
|
| 321 |
+
A short red-haired woman who wears a purple hat.
|
| 322 |
+
|
| 323 |
+
BDG
|
| 324 |
+
|
| 325 |
+
$\cdot d_{1}$ : A young woman who wears a white hat.
|
| 326 |
+
$\cdot d_{2}$ : A woman who wears a white hat.
|
| 327 |
+
BDGEM
|
| 328 |
+
$\cdot d_{1}$ : A short black woman with big, round faces.
|
| 329 |
+
$\cdot d_{2}$ : A young woman who doesn't like a white hat.
|
| 330 |
+
$d_{3}$ : A little woman who wears a pink hat.
|
| 331 |
+
|
| 332 |
+
trained models for performance improvement. For clarity of comparison, we summarize the existing studies on distractor generation in Table 10.
|
| 333 |
+
|
| 334 |
+
# 6 Conclusion
|
| 335 |
+
|
| 336 |
+
We present a state-of-the-art neural model based on a pre-trained transformer-based model for DG. We introduce two techniques, Answer Negative Regularization and Multi-task with Parallel MLM, to boost the DG performance. In addition, we also introduce BDG ensemble with an entropy maximization mechanism to enhance the DG quality by
|
| 337 |
+
|
| 338 |
+
Table 8: Qualitative Examination by Case Study
|
| 339 |
+
|
| 340 |
+
<table><tr><td></td><td>BLEU 1</td><td>BLEU 2</td><td>BLEU 3</td><td>BLEU 4</td><td>ROUGE L</td></tr><tr><td>PM(γ=1)</td><td>36.97</td><td>22.07</td><td>14.82</td><td>10.50</td><td>32.64</td></tr><tr><td>PM(γ=2)</td><td>38.45</td><td>23.21</td><td>15.81</td><td>11.36</td><td>33.18</td></tr><tr><td>PM(γ=3)</td><td>39.23</td><td>24.27</td><td>17.04</td><td>12.78</td><td>33.82</td></tr><tr><td>PM(γ=4)</td><td>39.22</td><td>24.24</td><td>17.08</td><td>12.95</td><td>34.05</td></tr><tr><td>PM(γ=5)</td><td>39.74</td><td>24.50</td><td>17.29</td><td>13.09</td><td>34.11</td></tr><tr><td>PM(γ=6)</td><td>39.81</td><td>24.81</td><td>17.66</td><td>13.56</td><td>34.01</td></tr><tr><td>PM(γ=7)</td><td>39.37</td><td>24.13</td><td>17.09</td><td>13.07</td><td>33.45</td></tr><tr><td>AN+PM(γ=1)</td><td>37.49</td><td>22.08</td><td>13.73</td><td>10.44</td><td>32.40</td></tr><tr><td>AN+PM(γ=2)</td><td>38.25</td><td>22.81</td><td>15.33</td><td>10.91</td><td>32.99</td></tr><tr><td>AN+PM(γ=3)</td><td>38.71</td><td>23.54</td><td>16.26</td><td>12.04</td><td>33.82</td></tr><tr><td>AN+PM(γ=4)</td><td>38.84</td><td>23.70</td><td>16.57</td><td>12.46</td><td>33.53</td></tr><tr><td>AN+PM(γ=5)</td><td>39.19</td><td>23.97</td><td>16.96</td><td>12.92</td><td>33.67</td></tr><tr><td>AN+PM(γ=6)</td><td>39.58</td><td>24.23</td><td>17.11</td><td>13.11</td><td>33.38</td></tr><tr><td>AN+PM(γ=7)</td><td>39.52</td><td>24.29</td><td>17.28</td><td>13.28</td><td>33.40</td></tr></table>
|
| 341 |
+
|
| 342 |
+
Table 9: Performance Comparison on Token Scores with Different $\gamma$ Settings
|
| 343 |
+
|
| 344 |
+
leveraging a reading comprehension model. By experimental evaluation, our models outperform the existing best performing models and advances the state-of-the-art result to 39.81 (BLEU 1 score).
|
| 345 |
+
|
| 346 |
+
# Acknowledgement
|
| 347 |
+
|
| 348 |
+
This work was supported by the Ministry of Science and Technology, Taiwan, under projects No. 109-2221-E-005-058-MY3 and 107-2221-E-005-064-MY2
|
| 349 |
+
|
| 350 |
+
# References
|
| 351 |
+
|
| 352 |
+
Jun Araki, Dheeraj Rajagopal, Sreecharan Sankaranarayanan, Susan Holm, Yukari Yamakawa, and Teruko Mitamura. 2016. Generating questions and multiple-choice answers using semantic analysis of texts. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 1125-1136.
|
| 353 |
+
|
| 354 |
+
Ying-Hong Chan and Yao-Chung Fan. 2019. A recurrent bert-based model for question generation. In Proceedings of the 2nd Workshop on Machine Reading for Question Answering, pages 154-162.
|
| 355 |
+
|
| 356 |
+
Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pre-training for natural language understanding and generation. In Advances in Neural Information Processing Systems, pages 13042-13054.
|
| 357 |
+
|
| 358 |
+
Yifan Gao, Lidong Bing, Piji Li, Irwin King, and Michael R Lyu. 2019. Generating distractors for reading comprehension questions from real examinations. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6423-6430.
|
| 359 |
+
|
| 360 |
+
Qi Guo, Chinmay Kulkarni, Aniket Kittur, Jeffrey P Bigham, and Emma Brunskill. 2016. Questionator: Generating knowledge assessments for arbitrary topics. In Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI'16). AAAI Press.
|
| 361 |
+
|
| 362 |
+
Girish Kumar, Rafael E Banchs, and Luis Fernando D'Haro. 2015. Revup: Automatic gap-fill question generation from educational texts. In Proceedings of the Tenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 154-161.
|
| 363 |
+
|
| 364 |
+
Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. 2017. Race: Large-scale reading comprehension dataset from examinations. arXiv preprint arXiv:1704.04683.
|
| 365 |
+
|
| 366 |
+
Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learning of language representations. arXiv preprint arXiv:1909.11942.
|
| 367 |
+
|
| 368 |
+
<table><tr><td rowspan="2"></td><td colspan="2">Distractor Level</td><td colspan="2">Answer Type</td><td colspan="2">Method Type</td><td>Model</td></tr><tr><td>Word/phrase</td><td>Sentence</td><td>Cloze</td><td>R.C.</td><td>Extractive</td><td>Generative</td><td>Type</td></tr><tr><td>Gao et al. 2019</td><td>Y</td><td>Y</td><td></td><td>Y</td><td></td><td>Y</td><td>RNN</td></tr><tr><td>Zhou et al. 2019</td><td>Y</td><td>Y</td><td></td><td>Y</td><td></td><td>Y</td><td>RNN</td></tr><tr><td>Araki et al. 2016</td><td>Y</td><td></td><td>Y</td><td></td><td>Y</td><td></td><td>Non-neural model</td></tr><tr><td>Welbl et al. 2017</td><td>Y</td><td></td><td></td><td>Y</td><td>Y</td><td></td><td>Random forests</td></tr><tr><td>Guo et al. 2016</td><td>Y</td><td></td><td>Y</td><td></td><td>Y</td><td></td><td>Word2Vec</td></tr><tr><td>Kumar et al. 2015</td><td>Y</td><td>Y</td><td>Y</td><td></td><td>Y</td><td></td><td>SVM</td></tr><tr><td>Liang et al. 2017</td><td>Y</td><td></td><td>Y</td><td></td><td></td><td>Y</td><td>GAN</td></tr><tr><td>Liang et al. 2018</td><td>Y</td><td>Y</td><td></td><td>Y</td><td>Y</td><td></td><td>Non-neural model</td></tr></table>
|
| 369 |
+
|
| 370 |
+
Table 10: An Overview of the Existing DG works
|
| 371 |
+
|
| 372 |
+
Chen Liang, Xiao Yang, Neisarg Dave, Drew Wham, Bart Pursel, and C Lee Giles. 2018. Distractor generation for multiple choice questions using learning to rank. In Proceedings of the thirteenth workshop on innovative use of NLP for building educational applications, pages 284-290.
|
| 373 |
+
|
| 374 |
+
Chen Liang, Xiao Yang, Drew Wham, Bart Pursel, Rebecca Passonneaur, and C Lee Giles. 2017. Distractor generation with generative adversarial nets for automatically creating fill-in-the-blank questions. In Proceedings of the Knowledge Capture Conference, pages 1-4.
|
| 375 |
+
|
| 376 |
+
Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.
|
| 377 |
+
|
| 378 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach.
|
| 379 |
+
|
| 380 |
+
Ruslan Mitkov, Ha Le An, and Nikiforos Karamanis. 2006. A computer-aided environment for generating multiple-choice test items. Natural language engineering, 12(2):177-194.
|
| 381 |
+
|
| 382 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting on association for computational linguistics, pages 311-318. Association for Computational Linguistics.
|
| 383 |
+
|
| 384 |
+
Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding by generative pre-training. URL https://s3-us-west-2. amazonaws.com/openai-assetss/researchcovers/languageunsupervised/language understanding paper.pdf.
|
| 385 |
+
|
| 386 |
+
Siyu Ren and Kenny Q Zhu. 2020. Knowledge-driven distractor generation for cloze-style multiple choice questions. arXiv preprint arXiv:2004.09853.
|
| 387 |
+
|
| 388 |
+
Irene Solaiman, Miles Brundage, Jack Clark, Amanda Askell, Ariel Herbert-Voss, Jeff Wu, Alec Radford, and Jasmine Wang. 2019. Release strategies and the
|
| 389 |
+
|
| 390 |
+
social impacts of language models. arXiv preprint arXiv:1908.09203.
|
| 391 |
+
|
| 392 |
+
Eiichiro Sumita, Fumiaki Sugaya, and Seiichi Yamamoto. 2005. Measuring non-native speakers' proficiency of english by using a test with automatically-generated fill-in-the-blank questions. In Proceedings of the second workshop on Building Educational Applications Using NLP, pages 61-68.
|
| 393 |
+
|
| 394 |
+
Johannes Welbl, Nelson F Liu, and Matt Gardner. 2017. Crowdsourcing multiple choice science questions. arXiv preprint arXiv:1707.06209.
|
| 395 |
+
|
| 396 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the-art natural language processing. ArXiv, abs/1910.03771.
|
| 397 |
+
|
| 398 |
+
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144.
|
| 399 |
+
|
| 400 |
+
Chak Yan Yeung, John SY Lee, and Benjamin K Tsou. 2019. Difficulty-aware distractor generation for gapfill items. In Proceedings of the The 17th Annual Workshop of the Australasian Language Technology Association, pages 159-164.
|
| 401 |
+
|
| 402 |
+
Xiaorui Zhou, Senlin Luo, and Yunfang Wu. 2019. Coattention hierarchical network: Generating coherent long distractors for reading comprehension.
|
| 403 |
+
|
| 404 |
+
Appendix
|
| 405 |
+
|
| 406 |
+
<table><tr><td>Content</td><td>The building is shaking. A woman with a baby in her arms is trying to open the door, but fails. Finding no way, she rushes into her bedroom and there they survive the earthquake. In a factory building, as the workshop floor swings under the terrible shaking, workers run for safety. Some hide under the machines and survive, but others who try to run outside are killed by the falling ceilings. These scenes, played by actors and actresses, are from a film of science education Making a Split Second Decision shown in 1998 on China Central TV in memory of Tangshan Earthquake. By studying actual cases in the earthquake areas and scientific experiments, experts find that buildings remain untouched for the first 12 seconds of an earthquake. In this short time, one has the best chance of surviving an earthquake by staying near the inside walls, in bedrooms and under beds, experts concluded in the film. "Earthquakes seem to catch the lives of those who run," said many survivors in the earthquake areas, describing how their friends were killed on the doorways or along the stair steps as they tried to get out of the building. Their advice was proved in the film, "Take a hiding-place where you are rather than run, unless you are sure you can reach a safe open place in ten seconds."</td></tr><tr><td>Question</td><td>The workers who try to run outside the building die because?</td></tr><tr><td>Answer</td><td>They don't have enough time to run outside.</td></tr><tr><td>Distractor</td><td>They don't know how to get out of the building.</td></tr></table>
|
| 407 |
+
|
| 408 |
+
Table 11: BDG showcase
|
| 409 |
+
|
| 410 |
+
<table><tr><td>Content</td><td>Henry found work in a bookstore after he finished middle school. He wouldn’t do anything but wanted to get rich. Mr.King thought he was too lazy and was going to send him away. Henry was afraid and had to work hard. It was a cold morning. It was snowing and there was thin ice on the streets. Few people went to buy the books and the young man had nothing to do. He hated to read, so he watched the traffic. Suddenly he saw a bag fall off a truck and it landed by the other side of the street. It must be full of expensive things. Henry said to himself. I have to get it, or others will take it away. He went out of the shop and ran across the street. A driver saw him and began to whistle, but he didn’t hear it and went on running. The man drove aside, hit a big tree and was hurt in the accident. Two weeks later Henry was taken to court. A judge asked if he heard the whistle when he was running across the street. He said that something was wrong with his ears and he could hear nothing. “But you’ve heard me this time.” said the judge. “Oh, I’m sorry. Now I can hear with one ear.” “Cover the ear with your hand and listen to me with your deaf one. Well, can you hear me?” “No, I can’t, Sir.”</td></tr><tr><td>Question</td><td>Why did Mr.King want to send Henry away?</td></tr><tr><td>Answer</td><td>Because Henry was too lazy.</td></tr><tr><td rowspan="3">BDG</td><td>Because Henry didn’t want to go.</td></tr><tr><td>Because Henry didn’t want to go out.</td></tr><tr><td>Because Henry didn’t want to go to the bookstore.</td></tr><tr><td rowspan="3">BDG ensemble</td><td>Because Henry didn’t want to go.</td></tr><tr><td>Because Henry wanted to be rich.</td></tr><tr><td>Because Henry wanted to be a clever man.</td></tr></table>
|
| 411 |
+
|
| 412 |
+
Table 12: Context for Example 1
|
| 413 |
+
|
| 414 |
+
<table><tr><td>Content</td><td>Most of the time, people wear hats to protect themselves from weather conditions. Hats are also worn to show politeness and as signs of social position. But nowadays, hats, especially women's hats, are much more than that. More exactly, hats have changed into fashion and style symbols by many movie stars. What's more, people now consider many different features when choosing even a simple hat. Many designers point out that, when choosing the right hat, it's important to consider the color of your skin as well as your hair, your height, and the shape of your face. First of all, the color of the hat should match the color of your skin and hair. For instance, black hats should be avoided if you are dark skinned. If a purple hat is placed on top of red hair, one will look as attractive as a summer flower. Second, the height of the hat is also an important point. Tall women should not go for hats with tall crowns, just as short women should choose hats with upturned brims to give the look of height. Third, and most importantly, the shape of the face decides the kind of hat one should pick. A small, gentle hat that fits the head looks good on a small face. However, women with big, round faces should choose a different style. As the saying goes, 'Fine feathers make fine birds.' Å good hat can not only help your dress but also support your features, so why not choose the best possible one next time you want to be in public?</td></tr><tr><td>Question</td><td>According to the article, which of the following women would look most attractive?</td></tr><tr><td>Answer</td><td>A short red-haired woman who wears a purple hat.</td></tr><tr><td rowspan="3">BDG</td><td>A young woman who wears a white hat.</td></tr><tr><td>A young woman who doesn't like a white hat.</td></tr><tr><td>A woman who wears a white hat.</td></tr><tr><td rowspan="3">BDG ensemble</td><td>A short black woman with big, round faces.</td></tr><tr><td>A young woman who doesn't like a white hat.</td></tr><tr><td>A little woman who wears a pink hat.</td></tr></table>
|
| 415 |
+
|
| 416 |
+
Table 13: Context for Example 2
|
| 417 |
+
|
| 418 |
+
<table><tr><td>Content</td><td>Memory, they say, is a matter of practice and exercise. If you have the wish and really made a conscious effort, then you can quite easily improve your ability to remember things. But even if you are successful, there are times when your memory seems to play tricks on you. Sometimes you remember things that really did not happen. One morning last week, for example, I got up and found that I had left the front door unlocked all night, yet I clearly remember locking it carefully the night before. Memory "trick" work the other way as well. Once in a while you remember not doing something, and then find out that you did. One day last month, for example, I was sitting in a barber shop waiting for my turn to get a haircut, and suddenly I realized that I had got a haircut two days before at the barber shop across the street from my office. We always seem to find something funny and amusing in incidents caused by people's forgetfulness or absent-mindedness. Stories about absent-minded professors have been told for years, and we never got tired of hearing new ones. Unfortunately, however, absent-mindedness is not always funny. There are times when "trick" of our memory can cause us great trouble.</td></tr><tr><td>Question</td><td>Which of the following statements is true according to the passage?</td></tr><tr><td>Answer</td><td>One night the writer forgot to lock the front door.</td></tr><tr><td rowspan="2">BDG</td><td>The writer couldn't find a hair cut in the barber shop.</td></tr><tr><td>The writer couldn't find a hair cut in the shop.</td></tr><tr><td rowspan="2">BDG ensemble</td><td>The writer didn't want to open the front door.</td></tr><tr><td>The writer couldn't find the reason why he left the front door.</td></tr></table>
|
| 419 |
+
|
| 420 |
+
Table 14: Yet another example for BDG multiple distractor generation
|
abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:148c1062046c3862575af78830edabd7627596012ac987bc4e51a29b0085fc31
|
| 3 |
+
size 1186766
|
abertbaseddistractorgenerationschemewithmultitaskingandnegativeanswertrainingstrategies/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5ce0a1b5c83a9f61e110dd8dd32f98b9ca3ec3c183b1ab248b0efb283b241cb
|
| 3 |
+
size 406341
|
abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/a0c61489-b7ee-4e8a-a1ea-343af81a791f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74377c75fdda39ae84bab66ebf49c361911547aa5cc39f951dff15e479622194
|
| 3 |
+
size 71406
|
abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/a0c61489-b7ee-4e8a-a1ea-343af81a791f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b5b9a772403fd5fdf2cfe53037bfb6f2aa870bbdf4d0ed63262e767b6f4655e
|
| 3 |
+
size 84299
|
abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/a0c61489-b7ee-4e8a-a1ea-343af81a791f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03093f8b19278d719fee3fdfb3eb500e51567d8182e9eea8d39d417698c73823
|
| 3 |
+
size 910897
|
abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/full.md
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Abstractive Multi-Document Summarization via Joint Learning with Single-Document Summarization
|
| 2 |
+
|
| 3 |
+
Hanqi Jin, Xiaojun Wan
|
| 4 |
+
|
| 5 |
+
Center for Data Science, Peking University
|
| 6 |
+
|
| 7 |
+
Wangxuan Institute of Computer Technology, Peking University
|
| 8 |
+
|
| 9 |
+
The MOE Key Laboratory of Computational Linguistics, Peking University
|
| 10 |
+
|
| 11 |
+
{jinhanqi,wanxiao jun}@pku.edu.cn
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Single-document and multi-document summarizations are very closely related in both task definition and solution method. In this work, we propose to improve neural abstractive multi-document summarization by jointly learning an abstractive single-document summarizer. We build a unified model for single-document and multi-document summarizations by fully sharing the encoder and decoder and utilizing a decoding controller to aggregate the decoder's outputs for multiple input documents. We evaluate our model on two multi-document summarization datasets: Multi-News and DUC-04. Experimental results show the efficacy of our approach, and it can substantially outperform several strong baselines. We also verify the helpfulness of single-document summarization to abstractive multi-document summarization task.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Document summarization aims at producing a fluent, condensed summary for the given document or document set. It involves identifying important information and filtering out redundant information from input sources. While single-document summarization takes a single source document as input, multi-document summarization requires producing a summary from a cluster of thematically related documents. There are two primary methodologies for document summarization: extractive and abstractive. Extractive methods directly select important sentences from the original documents, which are relatively simple but face the drawbacks of information redundancy and incoherence between sentences. Abstractive methods enable generating new words, phrases, and sentences, which are able to generate better summaries with higher readability and conciseness. In this paper, we focus on abstractive document summarization.
|
| 20 |
+
|
| 21 |
+
Empowered by large parallel datasets automatically harvested from online news websites, sequence-to-sequence learning has shown promising results on abstractive single-document summarization (See et al., 2017; Paulus et al., 2018; Tan et al., 2017; Celikyilmaz et al., 2018). Compared with single-document summarization, annotated multi-document summarization datasets are often scarce. Several works have explored adapting the neural encoder-decoder model trained for single-document summarization to multi-document summarization. Zhang et al. (2018) add a document set encoder to extend the neural abstractive model trained on large scale single-document summarization corpus to the multi-document summarization task. Lebanoff et al. (2018) incorporate the maximal marginal relevance method into a neural encoder-decoder model trained for single-document summarization to address the information redundancy for multi-document summarization.
|
| 22 |
+
|
| 23 |
+
Single-document and multi-document summarizations are very closely related in both task definition and solution method (Wan, 2010). Both tasks need to deal with document-level input, identify the important content of documents, and paraphrase the important information to generate the summary, while the main difference is that multi-document summarization involves summarizing multiple input documents. Since the two tasks are closely related, it is promising to learn for two summarization tasks jointly. Compared with single-document summarization, multi-document summarization needs to handle multiple input documents. A simple method is to concatenate multiple documents into a long flat text and treat it as a long sequence-to-sequence task. However, it blurs the boundaries between documents and loses the hierarchy within the document cluster. It is natural to regard multi-document summarization as a two-stage process
|
| 24 |
+
|
| 25 |
+
of summarizing every single document and then merging multiple summaries. Nevertheless, this process is quite trivial, and it is difficult to utilize multi-document summarization corpus to train the single-document summarization model. Furthermore, the synthesis of multiple summaries involves eliminating redundant parts and organizing related paragraphs or sentences, which are also challenges to be solved.
|
| 26 |
+
|
| 27 |
+
In this work, we propose a joint learning approach to improve neural abstractive multi-document summarization by using single-document summarization corpus to address these issues. Our approach first uses a shared document encoder to encode each document in the document set, then uses a shared decoder to predict the word probabilities for each document, and finally applies a decoding controller to aggregate all output probabilities from the summary decoder to make the final prediction at each decoding step. The shared encoder and decoder are jointly trained on the single document summarization data. In this way, we can unify single-document and multi-document summarizations into one architecture simultaneously, and make better use of single-document and multi-document corpora, so that both tasks can benefit from joint learning, especially for the multi-document summarization task.
|
| 28 |
+
|
| 29 |
+
We evaluate our approach on the benchmark multi-document summarization datasets, MultiNews and DUC-04, and it brings substantial improvements over several strong baselines for multi-document summarization. We leverage CNN/DailyMail, a single-document summarization dataset, to perform joint learning with Multi-News. We also test the performance on CNN/DailyMail test set, and joint learning also brings certain performance improvement for the single-document summarization baselines.
|
| 30 |
+
|
| 31 |
+
In summary, we make the following contributions in this paper:
|
| 32 |
+
|
| 33 |
+
- To the best of our knowledge, we are the first to explore joint learning for neural abstractive single-document and multi-document summarizations.
|
| 34 |
+
- We propose a unified model by fully sharing encoder and decoder and utilizing a decoding controller to aggregate the decoder's outputs for multiple input documents.
|
| 35 |
+
Experimental results show that our approach
|
| 36 |
+
|
| 37 |
+
substantially outperforms several strong baselines, and single document summarization is verified to be very helpful to neural abstractive multi-document summarization. Our code is publicly available at https://github.com/zhongxia96/MDS-and-SDS.
|
| 38 |
+
|
| 39 |
+
# 2 Related Work
|
| 40 |
+
|
| 41 |
+
# 2.1 Multi-Document Summarization
|
| 42 |
+
|
| 43 |
+
The methods for multi-document summarization can generally be categorized to extractive and abstractive. The extractive methods produce a summary by extracting and merging sentences from the input documents, while the abstractive methods generate a summary using arbitrary words and expressions based on the understanding of the documents. Due to the lack of available training data, most previous multi-document summarization methods were extractive (Erkan and Radev, 2004; Christensen et al., 2013; Yasunaga et al., 2017). Recently, two multi-document summarization datasets have been proposed, one for very long input, aimed at generating Wikipedia (Liu et al., 2018) and another dedicated to generating a comprehensive summary of multiple real-time news (Fabbri et al., 2019). Several works have begun to explore abstractive multi-document summarization. Liu et al. (2018) concatenated multiple source documents into a long flat text and modeled multi-document summarization as a long sequence-to-sequence task. Liu and Lapata (2019) represented cross-document relationships via an attention mechanism that allows sharing information as opposed to simply concatenating text spans and processing them as a flat sequence. Fabbri et al. (2019) incorporated MMR into a hierarchical pointer-generator network to address the information redundancy in multi-document summarization. The above works were all trained and tested on multi-document summarization corpus.
|
| 44 |
+
|
| 45 |
+
# 2.2 Adaptation Method from Single to Multi-Document Summarization
|
| 46 |
+
|
| 47 |
+
Since the neural abstractive models have achieved promising results on single-document summarization (See et al., 2017; Paulus et al., 2018; Gehrmann et al., 2018; Celikyilmaz et al., 2018), some works trained abstractive summarization models on a single document dataset and adjusted the model to adapt the multi-document summarization task. Zhang et al. (2018) added a document set en
|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
Figure 1: The overview of our model.
|
| 51 |
+
|
| 52 |
+
coder into the single document summarization framework and tuned the pre-trained model on the multi-document summarization dataset. Lebanoff et al. (2018) combined an extractive summarization algorithm (MMR) for sentence extraction to reweigh the original sentence importance distribution learned in the abstractive single document summarization model. In this work, we propose to jointly learn for two summarization tasks and build a unified model. It utilizes a shared encoder-decoder to summarize a document and use a decoding controller to aggregate all decoders' outputs. Compared with the above adaptation methods, our method can make better use of multi-document and single-document corpora and improve the effectiveness of single-document summarization at the same time.
|
| 53 |
+
|
| 54 |
+
# 3 Methodology
|
| 55 |
+
|
| 56 |
+
# 3.1 Overview and Notations
|
| 57 |
+
|
| 58 |
+
Multi-document summarization takes a document cluster $D = \{D_{1},D_{2},\ldots ,D_{I}\}$ as the input, and produces the summary $Y$ , where $I$ is the number of documents. Each document $D_{i} = (x_{i,1},x_{i,2},\dots,x_{i,N_{i}})$ is a sequence of $N_{i}$ words, and $Y = (y_{1},y_{2},\dots,y_{M})$ is a sequence of $M$ words. Compared with multi-document summarization, single-document summarization has only one input document. In order to unify the symbols, single-document summarization is regarded as a special input case of $I = 1$ .
|
| 59 |
+
|
| 60 |
+
As illustrated in Figure 1, our model consists of a document encoder, a summary decoder, and a decoding controller. Different documents in multi-document summarization share document encoder and summary decoder. Single-document summarization also shares document encoder and summary decoder with multi-document summarization. A decoding controller is applied to aggregate the outputs of the summary decoder for multiple input documents.
|
| 61 |
+
|
| 62 |
+
The shared document encoder reads each input document $D_{i}$ and builds the contextual-level representations $C_i$ .
|
| 63 |
+
|
| 64 |
+
$$
|
| 65 |
+
C _ {i} = \operatorname {e n c o d e r} \left(D _ {i}\right) \tag {1}
|
| 66 |
+
$$
|
| 67 |
+
|
| 68 |
+
In each decoding step $t$ , the shared summary decoder produces the vocabulary distribution of the next word given previously (predicted) words and each input document $D_{i}$ .
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
P _ {i} ^ {t} = \operatorname {d e c o d e r} \left(C _ {i}, y _ {1: t - 1}\right) \tag {2}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
Note that for multi-document summarization, the same sequence of previous words $y_{1:t-1}$ (i.e., partial summary) is used for decoding for every document of the multiple inputs.
|
| 75 |
+
|
| 76 |
+
Since single-document summarization only summarizes one input document, the summary decoder can make the final prediction based on the output vocabulary distribution. While for multi-document summarization, a decoding controller is applied to aggregate multiple vocabulary distributions from
|
| 77 |
+
|
| 78 |
+
the summary decoder for multiple input documents.
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
P _ {f} ^ {t} = \sum_ {i = 1} ^ {I} P _ {i} ^ {t} z _ {i} ^ {t} \tag {3}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
Here $z_{i}^{t}$ is the importance weight for each of the multiple vocabulary distributions in the $t$ -th step.
|
| 85 |
+
|
| 86 |
+
The following sections will introduce the document encoder, the summary decoder, and the decoding controller, respectively.
|
| 87 |
+
|
| 88 |
+
# 3.2 Document Encoder
|
| 89 |
+
|
| 90 |
+
Document encoder reads an input document $D_{i}$ and constructs its contextual-level representation. For multi-document summarization, multiple input documents can be processed in parallel. This part is the same as Transformer encoder (Vaswani et al., 2017), and we will give a brief introduction. The document encoder is composed of a stack of $L$ identical layers. Each layer has two sub-layers, where the first sub-layer is a multi-head self-attention mechanism, and the second sub-layer is a position-wise fully connected feed-forward network. A residual connection (He et al., 2016) is employed around each of the two sub-layers, followed by layer normalization (Ba et al., 2016).
|
| 91 |
+
|
| 92 |
+
Tokens of each input document are first represented by word embeddings. Let $e_{i,j}$ denote the embedding assigned to word $x_{i,j}$ . Since the Transformer is a non-recurrent model, we need to add the "positional embedding" $p_j$ to the word embedding to indicate the position of the word in the document, and the input representation can be obtained by simply adding these two representations: $w_{i,j} = e_{i,j} + p_j$ . We take $\{w_{i,1}, w_{i,2}, \ldots, w_{i,N_i}\}$ as the input to the document encoder. For convenience, we denote the input of the first layer as $h^0$ and the output of $l$ -th layer as $h^l$ . The multi-head self-attention sub-layer takes the output of the previous layer as the input to construct contextual-level representation, while the FFN sub-layer is used to transform the representation further.
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
\tilde {h} = \operatorname {L a y e r N o r m} \left(h ^ {l - 1} + \operatorname {M H A t t} \left(h ^ {l - 1}, h ^ {l - 1}\right)\right)
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
h ^ {l} = \operatorname {L a y e r N o r m} (\tilde {h} + \operatorname {F F N} (\tilde {h})) \tag {4}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
The final output $h^L$ is fed to the summary decoder, and it is also fed to the decoding controller for multi-document summarization. For convenience, we denote the output for the document $D_i$ as $C_i$ .
|
| 103 |
+
|
| 104 |
+
# 3.3 Summary Decoder
|
| 105 |
+
|
| 106 |
+
In each decoding step, the summary decoder takes the decoded subsequences $(y_{1},y_{2},\dots ,y_{t - 1})$ as the input, and predicts the probability distribution of generating the next word for each input document $D_{i}$ . Similar to the document encoder, the summary decoder is also a stack of $L$ identical layers. The layer consists of three sub-layers: masked multi-head self-attention mechanism, multi-head cross-attention mechanism over the output of the encoder stack, and position-wise feed-forward network.
|
| 107 |
+
|
| 108 |
+
We also need to add "positional embedding" to the word embedding in the same way as the document encoder. Let $d^{l}$ denote the output of the $l$ -th layer in the summary decoder, and the input for the first layer as $d^{0}$ . The masked multi-head self-attention sub-layer is used for encoding the information of the decoded subsequences. The output of the self-attention is fed to the cross-attention sub-layer and feed-forward network. The cross-attention sub-layer performs multi-head attention over the output $C_{i}$ of the document encoder.
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\tilde {d} = \text {L a y e r N o r m} \left(d ^ {l - 1} + \text {M H A t t} \left(d ^ {l - 1}, d ^ {l - 1}\right)\right)
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
g = \text {L a y e r N o r m} (\tilde {d} + \text {M H A t t} (\tilde {d}, C _ {i}))
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
d ^ {l} = \text {L a y e r N o r m} (g + \operatorname {F F N} (g)) \tag {5}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
Let $U_{i}^{t}$ denote the output of the $L$ -th layer for document $D_{i}$ at position $t$ .
|
| 123 |
+
|
| 124 |
+
The output $U_{i}^{t}$ is passed through a softmax layer to calculate the generation distribution of next word over the target vocabulary.
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\hat {P} _ {i} ^ {t} = \operatorname {s o f t m a x} \left(U _ {i} ^ {t} W _ {g} + b _ {g}\right) \tag {6}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
where $W_{g}\in \mathbb{R}^{d_{model}\times d_{vocab}},b_{g}\in \mathbb{R}^{d_{vocab}}$ and $d_{vocab}$ is the size of target vocabulary. To tackle the problem of out-of-vocabulary (OOV) words, we compute the copy attention $\varepsilon_i^t$ between $U_{i}^{t}$ and the input representations $C_i$ to allow copying words from the source text, and obtain the copy distribution (Gu et al., 2016).
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\varepsilon_ {i} ^ {t} = \mathrm {s o f t m a x} (U _ {i} ^ {t} C _ {i} ^ {\top})
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\tilde {P} _ {i} ^ {t} = \sum_ {j = 1} ^ {N _ {i}} \varepsilon_ {i, j} ^ {t} o _ {i, j} \tag {7}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
where $o_{i,j}$ is the one-hot indicator vector for $w_{i,j}$ .
|
| 141 |
+
|
| 142 |
+
The generation probability $\eta_i^t\in [0,1]$ is calculated from the decoder output $U_{i}^{t}$ .
|
| 143 |
+
|
| 144 |
+
$$
|
| 145 |
+
\eta_ {i} ^ {t} = \sigma \left(U _ {i} ^ {t} W _ {\eta} + b _ {\eta}\right) \tag {8}
|
| 146 |
+
$$
|
| 147 |
+
|
| 148 |
+
where $W_{\eta}\in \mathbb{R}^{d_{model}\times 1},b_{\eta}\in \mathbb{R}^{1}$ . The overall distribution for document $D_{i}$ is given by combining the two distributions with $\eta_i^t$
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
P _ {i} ^ {t} = \eta_ {i} ^ {t} * \hat {P} _ {i} ^ {t} + (1 - \eta_ {i} ^ {t}) * \tilde {P} _ {i} ^ {t} \tag {9}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
# 3.4 Decoding Controller
|
| 155 |
+
|
| 156 |
+
Multi-document summarization requires producing a summary for a cluster of thematically related documents. While the summary decoder has predicted the vocabulary distribution for each input document, the decoding controller aggregates multiple vocabulary distributions to predict the final vocabulary distribution for multi-document summarization. Figure 2 shows a example. To better aggregate multiple vocabulary distributions, the controller needs to grasp the theme of the document cluster. We first use an attention pooling over the document encoder outputs to obtain corresponding document representation, and adopt a bidirectional LSTM (Hochreiter and Schmidhuber, 1997; Schuster and Paliwal, 1997) to encode multiple document representations in the document cluster. Then, we take the output of the bidirectional LSTM as the initial state of another unidirectional LSTM, which will be used to calculate the weights that the next word comes from each document.
|
| 157 |
+
|
| 158 |
+
Attention Pooling The attention pooling operation is used over the contextual-level representations $C_i = (c_{i,1}, c_{i,2}, \dots, c_{i,N_i})$ to obtain a fixed-length representation $\hat{c}_i$ for document $D_i$ . We first transform the input vector $c_{i,j}$ into attention score $a_{i,j}$ and value vector $v_{i,j}$ . Then we calculate a probability distribution $\hat{a}_i$ over words within the document $D_i$ based on attention scores.
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
a _ {i, j} = c _ {i, j} W _ {a}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
v _ {i, j} = c _ {i, j} W _ {v} \tag {10}
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
\hat {a} _ {i, j} = \frac {\exp (a _ {i , j})}{\sum_ {j = 1} ^ {n} \exp (a _ {i , j})}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where $W_{a}\in \mathbb{R}^{d_{model}\times 1}$ and $W_{v}\in \mathbb{R}^{d_{model}\times d_{model}}$ . Finally, we get the document vector $\hat{c}_i$ by weighing the value vectors.
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\hat {c} _ {i} = \sum_ {j = 1} ^ {n} \hat {a} _ {i, j} v _ {i, j} \tag {11}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
A bidirectional LSTM is adopted to further encode document representations $\{\hat{c}_1,\hat{c}_2,\dots ,\hat{c}_I\}$ The forward LSTM reads the document context representations from left to right and gets
|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
Figure 2: The decoding controller weighs the multiple output distributions to predict the next word. If simply averaging the vocabulary distributions, we will get the word "is". And we can get the correct word "lives" by calculating and using the weights through the decoding controller.
|
| 182 |
+
|
| 183 |
+
a sequence of hidden states $\left(\overrightarrow{f}_1,\overrightarrow{f}_2,\dots ,\overrightarrow{f}_I\right)$ The backward LSTM reads the document context representations reversely, from right to left, and results in another sequence of hidden states $\left(\overleftarrow{f}_1,\overleftarrow{f}_2,\dots ,\overleftarrow{f}_I\right)$ . We add the last forward hidden state $\vec{f}_I$ and backward hidden state $\overleftarrow{f}_{1}$ as the output $r$ of the bidirectional LSTM.
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
r = \overleftarrow {f} _ {1} + \overrightarrow {f} _ {I} \tag {12}
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
The output $r$ is used as the initial state of another unidirectional LSTM. In the decoding step $t$ , the unidirectional LSTM takes the previous word $y_{t-1}$ as input and produces the new state $s_t$ .
|
| 190 |
+
|
| 191 |
+
$$
|
| 192 |
+
s _ {t} = \operatorname {L S T M} \left(s _ {t - 1}, y _ {t - 1}\right) \tag {13}
|
| 193 |
+
$$
|
| 194 |
+
|
| 195 |
+
We calculate the weights $z^t$ using $s_t$ and decoder outputs $U^t = \{U_1^t, U_2^t, \dots, U_I^t\}$ :
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
z ^ {t} = \operatorname {s o f t m a x} \left(U ^ {t} W _ {z} s _ {t} ^ {\top}\right) \tag {14}
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
where $W_{z}\in \mathbb{R}^{d_{model}\times d_{model}}$
|
| 202 |
+
|
| 203 |
+
The final vocabulary distribution for multi-document summary generation is the interpolation of all output distributions.
|
| 204 |
+
|
| 205 |
+
$$
|
| 206 |
+
P _ {f} ^ {t} = \sum_ {i = 1} ^ {I} P _ {i} ^ {t} z _ {i} ^ {t} \tag {15}
|
| 207 |
+
$$
|
| 208 |
+
|
| 209 |
+
# 3.5 Objective Function
|
| 210 |
+
|
| 211 |
+
We jointly learn the single-document and multi-document summarizations in a unified model. Our
|
| 212 |
+
|
| 213 |
+
goal is to maximize the probability of output summary $Y$ given a single document $S$ or a document set $D$ . We use $\mathcal{T}_s$ to denote the single-document training set and $\mathcal{T}_m$ to denote the multi-document training set. We calculate negative logarithm likelihood function for single-document and multi-document summarizations, respectively.
|
| 214 |
+
|
| 215 |
+
$$
|
| 216 |
+
L _ {s} = - \frac {1}{| \mathcal {T} _ {s} |} \sum_ {(S, Y) \in \mathcal {T} _ {s}} \log P (Y | S)
|
| 217 |
+
$$
|
| 218 |
+
|
| 219 |
+
$$
|
| 220 |
+
L _ {m} = - \frac {1}{\left| \mathcal {T} _ {m} \right|} \sum_ {(D, Y) \in \mathcal {T} _ {m}} \log P (Y | D) \tag {16}
|
| 221 |
+
$$
|
| 222 |
+
|
| 223 |
+
For simplicity, we optimize the sum of the above losses.
|
| 224 |
+
|
| 225 |
+
# 4 Experiment
|
| 226 |
+
|
| 227 |
+
# 4.1 Datasets
|
| 228 |
+
|
| 229 |
+
We conduct experiments on a latest released Multi-News dataset (Fabbri et al., 2019) and a standard DUC multi-document summarization dataset (Over et al., 2007). The Multi-News dataset contains 44,972 documents-summary pairs for training, 5,622 for development, and 5,622 for test. The number of source documents per summary ranges from 2 to 10. DUC-03 and DUC-04 contain 30 and 50 topics, respectively. Each topic has 10 documents paired with 4 different human-written references. CNN/Dailymail (Hermann et al., 2015; Nallapati et al., 2016) is a large scale single document summarization dataset, which contains 287,226 document-summary pairs for training, 13,368 for development and 11,490 for test.
|
| 230 |
+
|
| 231 |
+
# 4.2 Implementation Details
|
| 232 |
+
|
| 233 |
+
We train the model on the Multi-News and CNN/DailyMail datasets. Considering that different datasets have different expression characteristics, we set different BOS for each dataset in the decoding phase. We take the DUC-04 as the test set, and DUC-03 is used for tuning the model when evaluating on DUC-04 dataset. We set our model parameters based on preliminary experiments on the Multi-News and CNN/DailyMail development set. We prune the vocabulary to $50\mathrm{k}$ and use the word in source text with maximum weights in copy attention to replacing the unknown word to solve the OOVs problem. We set the dimension of word embeddings and hidden units $d_{model}$ to 512, feed-forward units to 1024. We set 4 heads for multi-head self-attention, masked multi-head
|
| 234 |
+
|
| 235 |
+
self-attention, and multi-head cross-attention. The number of layers $L$ is set to 6. We set dropout rate to 0.1 and use Adam optimizer with an initial learning rate $\alpha = 0.0001$ , momentum $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ and weight decay $\epsilon = 10^{-5}$ . The learning rate is halved if the valid loss on the development set increases for two consecutive epochs. We use a mini-batch size of 10. Beam search with a beam size of 5 is used for decoding.
|
| 236 |
+
|
| 237 |
+
# 4.3 Metrics and Baselines
|
| 238 |
+
|
| 239 |
+
We use ROUGE (Lin, 2004) to evaluate the generated summary in our experiments. Following previous work, we report ROUGE F1<sup>1</sup> on Multi-News and DUC-04 datasets. We compare our model with several typical baselines and several baselines proposed in the latest years.
|
| 240 |
+
|
| 241 |
+
PGN (See et al., 2017) is an RNN based model with an attention mechanism and allows the system to copy words from the source text via pointing for abstractive summarization. CopyTransformer (Gehrmann et al., 2018) augments Transformer with one of the attention heads chosen randomly as the copy distribution. Hi-MAP (Fabbri et al., 2019) expands the pointer-generator network model into a hierarchical network and integrates an MMR module to calculate sentence-level scores. The above baselines are trained on the Multi-News corpus, and have been compared and reported in Fabbri et al. (2019), which releases the Multi-News dataset. We directly report the results of the above methods from this paper. PG-MMR (Lebanoff et al., 2018) combines MMR with the abstractive model trained on CNN/DailyMail corpus to generate the summary from multi-document inputs, which requires no multi-document summarization training corpus. SDS-to-MDS (Zhang et al., 2018) is an approach to extend the neural abstractive model trained on CNN/DailyMail dataset to the multi-document summarization task, which leverages multi-document summarization corpus to tune the pre-trained single-document summarization model. It originally conducts experiments on the DUC datasets, and we also reproduce their method on the Multi-News dataset. Besides, we implement CopyTransformer\* to jointly learn single-document and multi-document summarizations, and train it on the CNN/DailyMail and Multi-News corpora. It concatenates the multiple
|
| 242 |
+
|
| 243 |
+
<table><tr><td>Model</td><td>R-1</td><td>R-2</td><td>R-SU4</td></tr><tr><td>LexRank (Erkan and Radev, 2004)</td><td>38.27</td><td>12.70</td><td>13.20</td></tr><tr><td>TextRank (Mihalcea and Tarau, 2004)</td><td>38.44</td><td>13.10</td><td>13.50</td></tr><tr><td>MMR(Carbonell and Goldstein, 1998)</td><td>38.77</td><td>11.98</td><td>12.91</td></tr><tr><td>PGN (See et al., 2017)</td><td>41.85</td><td>12.91</td><td>16.46</td></tr><tr><td>CopyTransformer(Gehrmann et al., 2018)</td><td>43.57</td><td>14.03</td><td>17.37</td></tr><tr><td>Hi-MAP(Fabbri et al., 2019)</td><td>43.47</td><td>14.89</td><td>17.41</td></tr><tr><td>SDS-to-MDS(Zhang et al., 2018)</td><td>44.74</td><td>15.93</td><td>19.44</td></tr><tr><td>CopyTransformer*</td><td>45.03</td><td>16.35</td><td>19.59</td></tr><tr><td>Ours</td><td>46.26</td><td>17.02</td><td>20.46</td></tr></table>
|
| 244 |
+
|
| 245 |
+
Table 1: ROUGE F1 evaluation results on the Multi-News test set.
|
| 246 |
+
|
| 247 |
+
<table><tr><td>Model</td><td>R-1</td><td>R-2</td><td>R-SU4</td></tr><tr><td>LexRank (Erkan and Radev, 2004)</td><td>35.56</td><td>7.87</td><td>11.86</td></tr><tr><td>TextRank (Mihalcea and Tarau, 2004)</td><td>33.16</td><td>6.13</td><td>10.16</td></tr><tr><td>MMR(Carbonell and Goldstein, 1998)</td><td>30.14</td><td>4.55</td><td>8.16</td></tr><tr><td>PGN (See et al., 2017)</td><td>31.43</td><td>6.03</td><td>10.01</td></tr><tr><td>CopyTransformer(Gehrmann et al., 2018)</td><td>28.54</td><td>6.38</td><td>7.22</td></tr><tr><td>PG-MMR(Lebanoff et al., 2018)</td><td>36.42</td><td>9.36</td><td>13.23</td></tr><tr><td>Hi-MAP(Fabbri et al., 2019)</td><td>35.78</td><td>8.90</td><td>11.43</td></tr><tr><td>SDS-to-MDS(Zhang et al., 2018)</td><td>36.7</td><td>7.83</td><td>12.4</td></tr><tr><td>CopyTransformer*</td><td>36.48</td><td>8.22</td><td>12.29</td></tr><tr><td>Ours</td><td>37.24</td><td>8.60</td><td>12.67</td></tr></table>
|
| 248 |
+
|
| 249 |
+
Table 2: ROUGE F1 evaluation results on the DUC-04 dataset.
|
| 250 |
+
|
| 251 |
+
input documents into a long flat text, and treats multi-document summarization as a long single-document summarization task. The best hyperparameter configuration is chosen for each model.
|
| 252 |
+
|
| 253 |
+
# 4.4 Automatic Evaluation
|
| 254 |
+
|
| 255 |
+
Following previous work, we report ROUGE-1 (unigram), ROUGE-2 (bigram) and ROUGE-SU4 (skip bigrams with a maximum distance of 4 words) scores as the metrics for automatic evaluation (Lin and Hovy, 2003). In Table 1, we report the results on the Multi-News, and our proposed model outperforms various baseline models. CopyTransformer performs much better than PGN and achieves 1.72 points improvement on the ROUGE-1 F1, which demonstrates the superiority of the Transformer architecture. The methods of leveraging single-document corpus (i.e., SDS-to-MDS, CopyTransformer\*, and ours) perform much better than that of only training on multi-document corpus (i.e., PGN, CopyTransformer, and Hi-MAP). Our model gains an improvement of 1.52 points compared with SDS-to-MDS, 1.23 points compared with CopyTransformer\* on ROUGE-1 F1, which verifies the effectiveness of the proposed architecture for the multi-document summarization task.
|
| 256 |
+
|
| 257 |
+
In Table 2, we report the results on the DUC-04 test set. Our model achieves scores of 37.24, 8.60 and 12.67 on three ROUGE metrics, respectively. PG-MMR and Hi-MAP obtain the higher score on ROUGE-2 or ROUGE-SU4 F1, while they employ the MMR technique to avoid the redundancy further. Our proposed model achieves the best performances on ROUGE-1 F1 among all compared models. It indicates our proposed model has a good transferability between different datasets.
|
| 258 |
+
|
| 259 |
+
# 4.5 Human Evaluation
|
| 260 |
+
|
| 261 |
+
To further evaluate the quality of the generated summaries, we carry out a human evaluation. We focus on three aspects: fluency, informativeness, and non-redundancy. The fluency indicator focuses on whether the summary is well-formed and grammatical. The informativeness indicator can reflect whether the summary covers salient points from the input documents. The non-redundancy indicator measures whether the summary contains repeated information. We sample 50 instances from the Multi-News test set and employ five graduate students to rate each summary. Each human judgment evaluates all outputs of different systems for the same sample. Three human judgments are obtained for every sample, and the final scores are averaged across different judges.
|
| 262 |
+
|
| 263 |
+
Results are presented in Table 3. We can see that our model performs much better than all baselines. The Spearman correlation coefficients between annotators are high, which guarantees the validity of the human evaluation. In the fluency indicator, our model achieves a high score of 3.5, which is higher than 3.42 of CopyTransfromer\* and 3.3 of SDS-to-MDS, indicating that our model can reduce the grammatical errors and improve the readability of the summary. In the informativeness indicator, our model is higher than CopyTransfromer\* by 0.16 and SDS-to-MDS by 0.2, which indicates that our model can effectively capture the salient information. In the non-redundancy indicator, our model also outperforms all baselines. It indicates our proposed method can better avoid repeating information of the generated summary.
|
| 264 |
+
|
| 265 |
+
# 4.6 Ablation Study
|
| 266 |
+
|
| 267 |
+
We perform the ablation study to investigate the influence of joint learning with single-document summarization and the effectiveness of the decoding controller. First, we train the model only on the Multi-News dataset to verify the helpfulness
|
| 268 |
+
|
| 269 |
+
<table><tr><td>Model</td><td>Fluency</td><td>Informativeness</td><td>Non-redundancy</td></tr><tr><td>CopyTransformer(Gehrmann et al., 2018)</td><td>3.1</td><td>3.08</td><td>2.94</td></tr><tr><td>Hi-MAP(Fabbri et al., 2019)</td><td>2.98</td><td>2.94</td><td>3.02</td></tr><tr><td>SDS-to-MDS(Zhang et al., 2018)</td><td>3.3</td><td>3.22</td><td>3.18</td></tr><tr><td>CopyTransformer*</td><td>3.42</td><td>3.26</td><td>3.24</td></tr><tr><td>Ours</td><td>3.5</td><td>3.42</td><td>3.36</td></tr><tr><td>Spearman</td><td>0.732</td><td>0.715</td><td>0.698</td></tr></table>
|
| 270 |
+
|
| 271 |
+
Table 3: Human evaluation. The ratings are on a Liert scale of 1(worst) to 5(best).
|
| 272 |
+
|
| 273 |
+
<table><tr><td>Model</td><td>R-1</td><td>R-2</td><td>R-SU4</td></tr><tr><td>Ours</td><td>46.26</td><td>17.02</td><td>20.46</td></tr><tr><td>w/o joint learning</td><td>44.64</td><td>16.14</td><td>19.06</td></tr><tr><td>w/o decoding controller</td><td>44.94</td><td>16.07</td><td>19.11</td></tr></table>
|
| 274 |
+
|
| 275 |
+
of single-document summarization to abstractive multi-document summarization task. Then we replace the decoding controller with a fixed weight vector $z = [1 / I, \dots, 1 / I]$ by simply averaging the vocabulary distributions from the summary decoder to verify the effectiveness of the decoding controller.
|
| 276 |
+
|
| 277 |
+
Table 4 presents the results. We find that the ROUGE-1 F1 score drops by 1.62 and the ROUGE-2 F1 score drops by 0.88 when training the model only on the Multi-News dataset. It indicates joint learning with single-document summarization is beneficial to the multi-document summarization. ROUGE-1 F1 score drops by 1.32 and ROUGE-2 F1 score drops by 0.95 after the decoding controller is removed, which shows that the decoding controller can effectively aggregate the outputs of the summary decoder for multiple input documents.
|
| 278 |
+
|
| 279 |
+
# 4.7 Discussion
|
| 280 |
+
|
| 281 |
+
Performance on Single-Document Summarization In Table 5, we report the results on CNN/DailyMail test set. CopyTransformer* outperforms CopyTransformer by 0.71 points on ROUGE-1 F1, which indicates joint learning can also improve the performance for single-document summarization. Compared with the CopyTransformer*, our method gains an improvement of 0.31 points on ROUGE-1 F1, which indicates our method can make better use of multi-document corpus to improve the performance for single-document summarization.
|
| 282 |
+
|
| 283 |
+
Performance against the Document Number of Inputs Different document number of inputs may affect the summarization performance, so we further test our model and strong baseline
|
| 284 |
+
|
| 285 |
+
Table 4: Results of ablation study on the Multi-News test set.
|
| 286 |
+
|
| 287 |
+
<table><tr><td>Model</td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td>Lead-3</td><td>40.34</td><td>17.70</td><td>36.57</td></tr><tr><td>PGN (See et al., 2017)</td><td>39.53</td><td>17.28</td><td>36.38</td></tr><tr><td>CopyTransformer</td><td>40.68</td><td>18.26</td><td>37.38</td></tr><tr><td>CopyTransformer*</td><td>41.39</td><td>18.58</td><td>38.03</td></tr><tr><td>Ours</td><td>41.7</td><td>18.86</td><td>38.36</td></tr></table>
|
| 288 |
+
|
| 289 |
+
Table 5: ROUGE F1 evaluation results on the CNN/DailyMail test set.
|
| 290 |
+
|
| 291 |
+

|
| 292 |
+
Figure 3: ROUGE-2 F1 score on different document number of inputs for CopyTransformer\* baseline and our model on Multi-News test set.
|
| 293 |
+
|
| 294 |
+
CopyTransformer\* with respect to different document number of inputs on the Multi-News test set. The document number of inputs in the test sets ranges from 2 to 7. In Figure 3, we can see that the performances of both models drop when the number of input documents increases. The performance curve of our model always appears on the top of that of CopyTransformer\*, and our model can get better results in the case of more documents than CopyTransformer\*.
|
| 295 |
+
|
| 296 |
+
# 5 Conclusion and Future Work
|
| 297 |
+
|
| 298 |
+
In this paper, we propose a joint learning approach to improve neural abstractive multi-document summarization by using single-document summarization dataset. Specifically, we use the shared document encoder and summary decoder to process each document in the document set, and apply a decoding controller to aggregates all output probabilities from the summary decoder for multi-document summarization. The shared encoder and decoder are jointly trained on the single document sum
|
| 299 |
+
|
| 300 |
+
marization dataset. Experimental results show that our approach substantially outperforms several strong multi-document summarization baselines and achieves state-of-the-art or very competitive performances on Multi-News and DUC-04 datasets.
|
| 301 |
+
|
| 302 |
+
In the future, we will incorporate BERT or other pre-trained language models into our model to further improve the performance.
|
| 303 |
+
|
| 304 |
+
# Acknowledgments
|
| 305 |
+
|
| 306 |
+
This work was supported by National Natural Science Foundation of China (61772036), Beijing Academy of Artificial Intelligence (BAAI) and Key Laboratory of Science, Technology and Standard in Press Industry (Key Laboratory of Intelligent Press Media Technology). We appreciate the anonymous reviewers for their helpful comments. Xiaojun Wan is the corresponding author.
|
| 307 |
+
|
| 308 |
+
# References
|
| 309 |
+
|
| 310 |
+
Lei Jimmy Ba, Jamie Ryan Kiros, and Geoffrey E. Hinton. 2016. Layer normalization. CoRR, abs/1607.06450.
|
| 311 |
+
Jaime G. Carbonell and Jade Goldstein. 1998. The use of mmr, diversity-based reranking for reordering documents and producing summaries. In SIGIR '98: Proceedings of the 21st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, August 24-28 1998, Melbourne, Australia, pages 335-336. ACM.
|
| 312 |
+
Asli Celikyilmaz, Antoine Bosselut, Xiaodong He, and Yejin Choi. 2018. Deep communicating agents for abstractive summarization. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 1662-1675. Association for Computational Linguistics.
|
| 313 |
+
Janara Christensen, Mausam, Stephen Soderland, and Oren Etzioni. 2013. Towards coherent multi-document summarization. In Human Language Technologies: Conference of the North American Chapter of the Association of Computational Linguistics, Proceedings, June 9-14, 2013, Westin Peachtree Plaza Hotel, Atlanta, Georgia, USA, pages 1163-1173. The Association for Computational Linguistics.
|
| 314 |
+
Günes Erkan and Dragomir R. Radev. 2004. Lexrank: Graph-based lexical centrality as salience in text summarization. J. Artif. Intell. Res., 22:457-479.
|
| 315 |
+
|
| 316 |
+
Alexander Richard Fabbri, Irene Li, Tianwei She, Suyi Li, and Dragomir R. Radev. 2019. Multi-news: A large-scale multi-document summarization dataset and abstractive hierarchical model. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, pages 1074-1084. Association for Computational Linguistics.
|
| 317 |
+
Sebastian Gehrmann, Yuntian Deng, and Alexander M. Rush. 2018. Bottom-up abstractive summarization. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 - November 4, 2018, pages 4098-4109. Association for Computational Linguistics.
|
| 318 |
+
Jiatao Gu, Zhengdong Lu, Hang Li, and Victor O. K. Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016, August 7-12, 2016, Berlin, Germany, Volume 1: Long Papers. The Association for Computer Linguistics.
|
| 319 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In CVPR.
|
| 320 |
+
Karl Moritz Hermann, Tomás Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In Advances in Neural Information Processing Systems 28: Annual Conference on Neural Information Processing Systems 2015, December 7-12, 2015, Montreal, Quebec, Canada, pages 1693-1701.
|
| 321 |
+
Sepp Hochreiter and Jürgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.
|
| 322 |
+
Logan Lebanoff, Kaiqiang Song, and Fei Liu. 2018. Adapting the neural encoder-decoder framework from single to multi-document summarization. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 - November 4, 2018, pages 4131-4141. Association for Computational Linguistics.
|
| 323 |
+
Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.
|
| 324 |
+
Chin-Yew Lin and Eduard H. Hovy. 2003. Automatic evaluation of summaries using n-gram cooccurrence statistics. In Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics, HLT-NAACL 2003, Edmonton, Canada, May 27 - June 1, 2003. The Association for Computational Linguistics.
|
| 325 |
+
|
| 326 |
+
Peter J. Liu, Mohammad Saleh, Etienne Pot, Ben Goodrich, Ryan Sepassi, Lukasz Kaiser, and Noam Shazeer. 2018. Generating wikipedia by summarizing long sequences. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net.
|
| 327 |
+
Yang Liu and Mirella Lapata. 2019. Hierarchical transformers for multi-document summarization. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 5070-5081. Association for Computational Linguistics.
|
| 328 |
+
Rada Mihalcea and Paul Tarau. 2004. Textrank: Bringing order into text. In Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing, EMNLP 2004, A meeting of SIGDAT, a Special Interest Group of the ACL, held in conjunction with ACL 2004, 25-26 July 2004, Barcelona, Spain, pages 404-411. ACL.
|
| 329 |
+
Ramesh Nallapati, Bowen Zhou, Cícero Nogueira dos Santos, Caglar Gülçehre, and Bing Xiang. 2016. Abstractive text summarization using sequence-to-sequence rnns and beyond. In Proceedings of the 20th SIGNLL Conference on Computational Natural Language Learning, CoNLL 2016, Berlin, Germany, August 11-12, 2016, pages 280-290. ACL.
|
| 330 |
+
Paul Over, Hoa Dang, and Donna Harman. 2007. DUC in context. Inf. Process. Manage., 43(6).
|
| 331 |
+
Romain Paulus, Caiming Xiong, and Richard Socher. 2018. A deep reinforced model for abstractive summarization. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net.
|
| 332 |
+
Mike Schuster and Kuldip K. Paliwal. 1997. Bidirectional recurrent neural networks. IEEE Trans. Signal Processing, 45(11):2673-2681.
|
| 333 |
+
Abigail See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summarization with pointer-generator networks. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1073-1083. Association for Computational Linguistics.
|
| 334 |
+
Jiwei Tan, Xiaojun Wan, and Jianguo Xiao. 2017. Abstractive document summarization with a graph-based attentional neural model. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1171-1181. Association for Computational Linguistics.
|
| 335 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
|
| 336 |
+
|
| 337 |
+
Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, 4-9 December 2017, Long Beach, CA, USA, pages 5998-6008.
|
| 338 |
+
Xiaojun Wan. 2010. Towards a unified approach to simultaneous single-document and multi-document summarizations. In COLING 2010, 23rd International Conference on Computational Linguistics, Proceedings of the Conference, 23-27 August 2010, Beijing, China, pages 1137-1145. Tsinghua University Press.
|
| 339 |
+
Michihiro Yasunaga, Rui Zhang, Kshitijh Meelu, Ayush Parek, Krishnan Srinivasan, and Dragomir R. Radev. 2017. Graph-based neural multi-document summarization. In Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017), Vancouver, Canada, August 3-4, 2017, pages 452-462. Association for Computational Linguistics.
|
| 340 |
+
Jianmin Zhang, Jiwei Tan, and Xiaojun Wan. 2018. Towards a neural network approach to abstractive multi-document summarization. CoRR, abs/1804.09010.
|
abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aae735894f118572a685a956364666a597223dc393efd183c7350dfd5d204d53
|
| 3 |
+
size 356601
|
abstractivemultidocumentsummarizationviajointlearningwithsingledocumentsummarization/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe06435ea65478ae21f6e66ff9c61b37f312fa11e06a51fa3127fb486f882370
|
| 3 |
+
size 340289
|
accuratepolyglotsemanticparsingwithdaggrammars/a362056e-eba4-4767-a0a5-8a53a63a1b9c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49a5b76258b57d3f5214f87496cf47de8b853688f8fca7c54ae9c72c34b183ec
|
| 3 |
+
size 90429
|
accuratepolyglotsemanticparsingwithdaggrammars/a362056e-eba4-4767-a0a5-8a53a63a1b9c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2be79ebb86207ad6da7438482c661e1a0f739c3256808bc12d9d125f301a308f
|
| 3 |
+
size 105215
|
accuratepolyglotsemanticparsingwithdaggrammars/a362056e-eba4-4767-a0a5-8a53a63a1b9c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5a9dc98896fcb02058ac0d8dc28d3afe6561121eacc519b62537780d426c94d3
|
| 3 |
+
size 701598
|
accuratepolyglotsemanticparsingwithdaggrammars/full.md
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accurate polyglot semantic parsing with DAG grammars
|
| 2 |
+
|
| 3 |
+
Federico Fancellu Akos Kádár Ran Zhang Afsaneh Fazly
|
| 4 |
+
Samsung AI Centre Toronto (SAIC Toronto)
|
| 5 |
+
|
| 6 |
+
{ federico.f, ran.zhang, a.fazly}@samsung.com
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
Semantic parses are directed acyclic graphs (DAGs), but in practice most parsers treat them as strings or trees, mainly because models that predict graphs are far less understood. This simplification, however, comes at a cost: there is no guarantee that the output is a well-formed graph. A recent work by Fancellu et al. (2019) addressed this problem by proposing a graph-aware sequence model that utilizes a DAG grammar to guide graph generation. We significantly improve upon this work, by proposing a simpler architecture as well as more efficient training and inference algorithms that can always guarantee the well-formedness of the generated graphs. Importantly, unlike Fancellu et al., our model does not require language-specific features, and hence can harness the inherent ability of DAG-grammar parsing in multilingual settings. We perform monolingual as well as multilingual experiments on the Parallel Meaning Bank (Abzianidze et al., 2017). Our parser outperforms previous graph-aware models by a large margin, and closes the performance gap between string-based and DAG-grammar parsing.
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
Semantic parsers map a natural language utterance into a machine-readable meaning representation, thus helping machines understand and perform inference and reasoning over natural language data. Various semantic formalisms have been explored as the target meaning representation for semantic parsing, including dependency-based compositional semantics (Liang et al., 2013), abstract meaning representation (AMR, Banarescu et al., 2013), minimum recursion semantics (MRS, Copestake et al., 2005), and discourse representation theory (DRT, Kamp, 1981). Despite meaningful differences across formalisms or parsing models, a representation in any of these formalisms can be ex
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
Figure 1: The discourse representation structure for 'We barred the door and locked it'. For ease of reference in later figures, each box includes a variable corresponding to the box itself, at top right in gray.
|
| 18 |
+
|
| 19 |
+
pressed as a directed acyclic graph (DAG).
|
| 20 |
+
|
| 21 |
+
Consider for instance the sentence 'We barred the door and locked it', whose meaning representation as a Discourse Representation Structure (DRS) is shown in Figure 1. A DRS is usually represented as a set of nested boxes (e.g. $b_{1}$ ), containing variable-bound discourse referents (e.g. 'lock(e2)'), semantic constants (e.g. 'speaker'), predicates (e.g. AGENT) expressing relations between variables and constants, and discourse relations between the boxes (e.g. CONTINUATION). This representation can be expressed as a DAG by turning referents and constants into vertices, and predicates and discourse relations into connecting edges, as shown in Figure 2.
|
| 22 |
+
|
| 23 |
+
How can we parse a sentence into a DAG? Commonly-adopted approaches view graphs as strings (e.g. van Noord and Bos, 2017; van Noord et al., 2018), or trees (e.g. Zhang et al., 2019a; Liu et al., 2018), taking advantage of the linearized graph representations provided in annotated data (e.g. Figure 3, where the graph in Figure 2 is represented in PENMAN notation (Goodman, 2020)).
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 2: The DRS of Figure 1 expressed as a DAG.
|
| 27 |
+
Figure 3: The DAG of Figure 2 expressed as a string.
|
| 28 |
+
|
| 29 |
+
```lisp
|
| 30 |
+
$(b_{1} / \square$ CONTINUATION $\mathbf{\Pi}_1(b_2 / \square$ :DRS(e1/bar :AGENT(c1/speake :THEME(x1/doorp) :CONTINUATION $\mathbf{\Pi}_2(b_3 / \square$ :DRS(e2/lock :AGENT c1 :PATIENTx1))
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
An advantage of these linearized representations is that they allow for the use of well-understood sequential decoders and provide a general framework to parse into any arbitrary formalism. However, these representations are unaware of the overall graph structure they build as well as of reentrant semantic relations, such as coordination, coreference, and control, that are widespread in language. Parsers such as Zhang et al. (2019b) although able to generate reentrancies in their output, they do so by simply predicting pointers back to already generated nodes.
|
| 34 |
+
|
| 35 |
+
Parsing directly into DAGs, although desirable, is less straightforward than string-based parsing. Whereas probabilistic models of strings and trees are ubiquitous in NLP, at present, it is an active problem in modern formal language theory to develop formalisms that allow to define probability distributions over DAGs of practical interest. A successful line of work derives semantic graphs using graph grammars that allow to generate a graph by rewriting non-terminal symbols with graph fragments. Among these, hyperedge replacement grammar (HRG) has been explored for parsing into semantic graphs (Habel, 1992; Chiang et al., 2013). However, parsing with HRGs is not practical due to its complexity and large number of possible derivations per graph (Groschwitz et al., 2015). Thus, work has looked at ways of constraining the space of possible derivations, usually in the form of align-
|
| 36 |
+
|
| 37 |
+
ment or syntax (Peng et al., 2015). For example, Groschwitz et al. (2018) and Donatelli et al. (2019) extracted fine-grained typed grammars whose productions are aligned to the input sentence and combined over a dependency-like structure. Similarly, Chen et al. (2018) draw on constituent parses to combine together HRG fragments.
|
| 38 |
+
|
| 39 |
+
Björklund et al. (2016) show that there exists a restricted subset of HRGs, Restricted DAG grammar (RDG), that provides a unique derivation per graph. A unique derivation means that a graph is generated by a unique sequence of productions, which can then be predicted using sequential decoders, without the need of an explicit alignment model or an underlying syntactic structure. Furthermore, the grammar places hard constraints on the rewriting process, which can be used to guarantee the well-formedness of output graphs during decoding. Drawing on this result, a recent work by Fancellu et al. (2019) introduces recurrent neural network RDGs, a sequential decoder that models graph generation as a rewriting process with an underlying RDG. However, despite the promising framework the approach in FA19² falls short in several aspects.
|
| 40 |
+
|
| 41 |
+
In this paper, we address these shortcomings, and propose an accurate, efficient, polyglot model for Neural RDG parsing. Specifically, our contributions are as follows:
|
| 42 |
+
|
| 43 |
+
Grammar: In practice, RDGs extracted from training graphs can be large and sparse. We show a novel factorization of the RDG production rules that reduces the sparsity of the extracted grammars. Furthermore, we make use of RDGs extracted on fully human annotated training data to filter out samples from a larger noisy machine-generated dataset that cannot be derived using such grammars. We find that this strategy not only drastically reduces the size of the grammar, but also improves the final performance.
|
| 44 |
+
|
| 45 |
+
Model: FA19 use a syntactic parsing inspired architecture, a stackLSTM, trained on a gamut of syntactic and semantic features. We replace this with a novel architecture that allows for batched input, while adding a multilingual transformer encoder that relies on word-embedding features only. Constrained Decoding: We identify a limitation in the decoding algorithm presented by FA19, in that it only partially makes use of the well
|
| 46 |
+
|
| 47 |
+
formdness constraints of an RDG. We describe the source of this error, implement a correction and show that we can guarantee well-formed DAGs.
|
| 48 |
+
|
| 49 |
+
Multilinguality: Training data in languages other than English is often small and noisy. FA19 addressed this issue with cross-lingual models using features available only for a small number of languages, but did not observe improvements over monolingual baselines in languages other than English. We instead demonstrate the flexibility of RDGs by extracting a joint grammar from graph annotations in different languages. At the same time, we make full use of our multilingual encoder to build a polyglot model that can accept training data in any language, allowing us to experiment with different combinations of data. Our results tell a different story where models that use combined training data from multiple languages always substantially outperform monolingual baselines.
|
| 50 |
+
|
| 51 |
+
We test our approach on the Parallel Meaning Bank (PMB, Abzianidze et al., 2017), a multilingual graphbank. Our experimental results demonstrate that our new model outperforms that of FA19 by a large margin on English while fully exploiting the power of RDGs to always guarantee a well-formed graph. We also show that the ability of simultaneously training on multiple languages substantially improves performance for each individual language. Importantly, we close the performance gap between graph-aware parsing and state-of-the-art string-based models.
|
| 52 |
+
|
| 53 |
+
# 2 Restricted DAG Grammar
|
| 54 |
+
|
| 55 |
+
We model graph generation as a process of graph rewriting with an underlying grammar. Our grammar is a restricted DAG grammar (RDG, Björklund et al., 2016), a type of context-free grammar designed to model linearized DAGs. For ease of understanding, we represent fragments in grammar productions as strings. This is shown in Figure 4, where the right-hand-side (RHS) fragment can be represented as its left-to-right linearization, with reentrant nodes flagged by a dedicated $ symbol.
|
| 56 |
+
|
| 57 |
+
An RDG is a tuple $\langle P, N, \Sigma, S, V \rangle$ where $P$ is a set of productions of the form $\alpha \rightarrow \beta$ ; $N$ is the set of non-terminal symbols $\{L, T_0, \dots, T_n\}$ up to a maximum number of $n$ ; $\Sigma$ is the set of terminal symbols; $S$ is the start symbol; $V$ is an unbounded set of variable references $\{\mathbb{S}1, \mathbb{S}2, \dots\}$ , whose role is described below.
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
Figure 4: An example production for a grammar. The graph fragment on the right-hand side can be replaced with a string representing its depth-first traversal.
|
| 61 |
+
|
| 62 |
+
The left-hand-side (LHS) $\alpha$ of a production $p\in P$ is a function $T_{i}\in N$ (where $i$ is the rank) that takes $i$ variable references as arguments. Variable references are what ensure the well-formedness of a generated graph in an RDG, by keeping track of how many reentrancies are expected in a derivation as well as how they are connected to their neighbouring nodes. Rank, in turn, is an indication of how many reentrancies are present in a graph derivation. For instance, in the graph fragment in Figure 4, given that there are two variable references and a non-terminal of rank 2, we are expecting two reentrant nodes at some point in the derivation. The RHS $\beta$ is a typed fragment made up of three parts: a variable $v$ describing the semantic type<sup>3</sup>, a label non-terminal $L$ , and a list of tuples $\langle e,s\rangle$ where $e$ is an edge label from a set of labels $E$ and $s$ is either a non-terminal function $T$ or a variable reference. The non-terminal $L$ can only be rewritten as a terminal symbol $l\in \Sigma$ . If a node is reentrant, we mark it with a superscript $*$ over $v$ . Variable references are percolated down the derivation and are replaced once a reentrant variable $v^{*}$ is found on the RHS.
|
| 63 |
+
|
| 64 |
+
Following FA19, we show a complete derivation in Figure 5 that reconstructs the graph in Figure 2. Our grammar derives strings by first rewriting the start symbol $S$ , a non-terminal function $T_{0}$ . At each subsequent step, the leftmost non-terminal function in the partially derived string is rewritten, with special handling for variable references described below. A derivation is complete when no non-terminals remain.
|
| 65 |
+
|
| 66 |
+
Variable references are resolved when applying a production that maps a reentrant variable name
|
| 67 |
+
|
| 68 |
+
<table><tr><td>Step</td><td>Production</td><td>Result</td></tr><tr><td>1</td><td>r1</td><td>(b1/L:CONT T2($1,$2):CONT T2($1,$2))</td></tr><tr><td>2</td><td>r2</td><td>(b1/□:CONT (b2/L:DRS T2($1,$2)):CONT T2($1,$2))</td></tr><tr><td>3</td><td>r3</td><td>(b1/□:CONT (b2/□:DRS (e1/L:AGENT T1($1):THEME T1($2)))) :CONT T2($1,$2))</td></tr><tr><td>4</td><td>r4</td><td>(b1/□:CONT (b2/□:DRS (e1/bar:AGENT (c*/L):THEME T1($2)))) :CONT T2(c,$2))</td></tr><tr><td>5</td><td>r5</td><td>(b1/□:CONT (b2/□:DRS (e1/bar:AGENT (c*/speaker):THEME (x*/L)))) :CONT T2(c,x))</td></tr><tr><td>6</td><td>r2</td><td>(b1/□:CONT (b2/□:DRS (e1/bar:AGENT (c*/speaker):THEME (x*/doorp)))) :CONT (b3/□:DRS T2(c,x)))</td></tr><tr><td>7</td><td>r6</td><td>(b1/□:CONT (b2/□:DRS (e1/bar:AGENT (c*/speaker):THEME (x*/doorp)))) :CONT (b3/□:DRS (e2/lock:AGENT c:PATIENT x)</td></tr></table>
|
| 69 |
+
|
| 70 |
+
Figure 5: A full RDG derivation for the graph in Figure 2. At each step $t$ the leftmost non-terminal $T_{n}$ (in blue) is rewritten into a fragment (underlined) and its label non-terminal $L$ (in red) replaced with a terminal. Variable references are percolated down the derivation unless a reentrant variable $v^{*}$ is found (step 4 and 5).
|
| 71 |
+
|
| 72 |
+
to a reference, as shown for production $r_4$ , where the variable $c$ is mapped to $1. Once this mapping is performed, all instances of$ 1 in the RHS are replaced by the corresponding variable name. In this way, the reference to $c$ is kept track of during the derivation becoming the target of AGENT in $r_6$ . Same applies in $r_5$ where $x$ is mapped to $2.
|
| 73 |
+
|
| 74 |
+
All our fragments are delexicalized. This is achieved by the separate non-terminal $L$ that at every step is rewritten in the corresponding terminal label (e.g. bar). Delexicalization allows to reduce the size of grammar and factorize the prediction of fragments and labels separately.
|
| 75 |
+
|
| 76 |
+
However, DAG grammars can still be large due to the many combinations of how edge labels and their corresponding non-terminals can appear in a fragment. For this reason, we propose a further simplification where edge labels are replaced with placeholders $\hat{e}_1\ldots \hat{e}_{|e|}$ , which we exemplify using the production in Figure 4 as follows:
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\mathrm {S} \rightarrow (b _ {1} / L \hat {e} _ {1} T _ {2} (\$ 1, \$ 2) \hat {e} _ {2} T _ {2} (\$ 1, \$ 2))
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
After a fragment is predicted, placeholders are then replaced with actual edge labels by a dedicated module (see § 3.2 for more details).
|
| 83 |
+
|
| 84 |
+
Comparison with Groschwitz et al. (2018)'s AM algebra. RDG is very similar to other graph grammars proposed for semantic parsing, in particular to Groschwitz et al. (2018)'s AM algebra used for AMR parsing. Groschwitz et al. (2018)'s framework relies on a fragment extraction process similar to ours where each node in a graph along with its outgoing edges makes up a fragment. However, the two grammars differ mainly in how typing and as a
|
| 85 |
+
|
| 86 |
+
consequence, composition is thought of: whereas in the AM algebra both the fragments themselves and the non-terminal edges are assigned thematic types (e.g. S[object], O[bject], MOD[ifier]), we only place rank information on the non-terminals and assign a more generic semantic type to the fragment.
|
| 87 |
+
|
| 88 |
+
The fine-grained thematic types in the AM algebra add a level of linguistic sophistication that RDG lacks, in that fragments fully specify the roles a word is expected to fill. This ensures that the output graphs are always semantically well-formed; in that AM algebra behaves very similar to CCG. However this sophistication not only requires ad-hoc heuristics that are tailored to a specific formalism (AMR in this case) but also relies on alignment information with the source words.
|
| 89 |
+
|
| 90 |
+
On the other hand, our grammar is designed to predict a graph structure in sequential models. Composition is constrained by the rank of a nonterminal so to ensure that at each decoding step the model is always aware of the placement of reentrant nodes. However, we do not ensure semantic well-formedness in that words are predicted separately from their fragments and we do not rely on alignment information. In that our grammar extraction algorithm does not rely on any heuristics and can be easily applied to any semantic formalism.
|
| 91 |
+
|
| 92 |
+
# 3 Architecture
|
| 93 |
+
|
| 94 |
+
Our model is an encoder-decoder architecture that takes as input a sentence and generates a DAG $G$ as a sequence of fragments with their corresponding labels, using the rewriting system in § 2. In what follows we describe how we obtain the logits for
|
| 95 |
+
|
| 96 |
+
each target prediction, all of which are normalized with the softmax function to yield probability distributions. A detailed diagram of our architecture is shown in Figure 7 in Appendix A.
|
| 97 |
+
|
| 98 |
+
# 3.1 Encoder
|
| 99 |
+
|
| 100 |
+
We encode the input sentence $w_{1}, \ldots, w_{|n|}$ using a pre-trained multilingual BERT (mBERT) model (Devlin et al., 2018). The final word-level representations are obtained through mean-pooling the sub-word representations of mBERT computed using the Wordpiece algorithm (Schuster and Nakajima, 2012). We do not rely on any additional (language-specific) features, hence making the encoder polyglot. The word vectors are then fed to a two-layer BiLSTM encoder, whose forward and backward states are concatenated to produce the final token encodings $\mathbf{s}_1^{enc}, \ldots, \mathbf{s}_n^{enc}$ .
|
| 101 |
+
|
| 102 |
+
# 3.2 Decoder
|
| 103 |
+
|
| 104 |
+
The backbone of the decoder is a two layer LSTM, with two separate attention mechanisms for each layer. Our decoding strategy follows steps similar to those in Figure 5. At each step we first predict a delexicalized fragment $f_{t}$ , and substitute a terminal label $l_{t}$ in place of $L$ . We initialize the decoder LSTM with the encoder's final state $\mathbf{s}_{\mathbf{n}}^{\mathrm{enc}}$ . At each step $t$ , the network takes as input $[\mathbf{f}_{t-1}; \mathbf{l}_{t-1}]$ , the concatenation of the embeddings of the fragment and its label output at the previous time step. At $t = 0$ , we initialize both fragment and label encodings with a $\langle \text{START} \rangle$ token. The first layer in the decoder is responsible for predicting fragments. The second layer takes as input the output representations of the first layer, and predicts terminal labels. The following paragraphs provide details on the fragment and label predictions.
|
| 105 |
+
|
| 106 |
+
Fragment prediction. We make the prediction of a fragment dependant on the embedding of the parent fragment and the decoder history. We define as parent fragment the fragment containing the non-terminal the current fragment is rewriting; for instance, in Figure 5, the fragment in step 1 is the parent of the fragment underlined in step 2. Following this intuition, at time $t$ , we concatenate the hidden state of the first layer $\mathbf{h}_t^1$ with a context vector $\mathbf{c}_t^1$ and the embedding of its parent fragment $\mathbf{u}_t$ . The logits for fragment $f_t$ are predicted with
|
| 107 |
+
|
| 108 |
+
a single linear layer $\mathbf{W}^{f}[\mathbf{c}_{t}^{1};\mathbf{u}_{t};\mathbf{h}_{t}^{1}] + \mathbf{b}$ . We compute $\mathbf{c}_{t}^{1}$ using a standard soft attention mechanism (Xu et al., 2015) as follows, where $\mathbf{s}_{1:\mathbf{N}}^{\mathrm{enc}}$ represents the concatenation of all encoding hidden states:
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\begin{array}{l} \mathbf {c} _ {t} ^ {1} = \sum_ {i} ^ {N} \alpha_ {i} \mathbf {s} _ {i} ^ {\text {e n c}} (1) \\ \mathbf {a} = \mathrm {M L P} ^ {1} \left[ \mathbf {h} _ {t} ^ {1}; \mathbf {s} _ {1: N} ^ {\text {e n c}} \right] (2) \\ \end{array}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\alpha_ {i} = \frac {e ^ {\mathbf {a} _ {i}}}{\sum_ {j} \mathbf {a} _ {j}} \tag {3}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathrm {M L P} ^ {1} (\mathbf {x}) = \operatorname {R e L U} (\mathbf {W x} + \mathbf {b}) \tag {4}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
Label prediction. Terminal labels in the output graph can either correspond to a lemma in the input sentence (e.g. 'bar', 'lock'), or to a semantic constant (e.g. 'speaker'). We make use of this distinction by incorporating a selection mechanism that learns to choose to predict either a lemma from the input or a token from a vocabulary of $L$ . We concatenate the hidden state of the second layer $\mathbf{h}_t^2$ with the embedding of the fragment predicted at the current time-step $\mathbf{f}_t$ and the second layer context vector $\mathbf{c}_t^2$ . Let us refer to this representation as $\mathbf{z}_t = [\mathbf{f}_t; \mathbf{h}_t^2; \mathbf{c}_t^2]$ . The context vector for the second layer is computed in the same way as $\mathbf{c}_t^1$ , but using $\mathbf{h}_t^2$ in place of $\mathbf{h}_t^1$ and separate attention MLP parameters. To compute the logits for label-prediction we apply a linear transformation to the encoder representations $\mathbf{e} = \mathbf{W}^s \mathbf{s}_{\mathbf{1:N}}^{\mathrm{enc}}$ . We concatenate the resulting vector with the label embedding matrix $\mathbf{L}$ and compute the dot product $\mathbf{z}_t^T[\mathbf{e}; \mathbf{L}]$ to obtain the final unnormalized scores jointly for all tokens in the input and $L$ .
|
| 123 |
+
|
| 124 |
+
In the PMB, each label is also annotated with its sense tag and information about whether it is presupposed in the context or not. We predict the former, $s_t$ , from a class of sense tags $S$ extracted from the training data, and the latter, $p_t$ , a binary variable, by passing $\mathbf{z}_t$ two distinct linear layers to obtain the logits for each.
|
| 125 |
+
|
| 126 |
+
Edge factorization. In §2, we discussed how we made grammars even less sparse by replacing the edge labels in a production fragment with placeholders. From a modelling perspective, this allows to factorize edge label prediction, where the decoder first predicts all the fragments in the graph and then predicts the edge labels $e_i \dots e_{|e|}$ that substitute in place of the placeholders.
|
| 127 |
+
|
| 128 |
+
To do so, we cache the intermediate representations $\mathbf{z}_t$ over time. We use these as features, to
|
| 129 |
+
|
| 130 |
+
replace the edge-placeholders $\hat{e}_i$ with the corresponding true edge labels $e_i$ . To obtain the edge-label logits we pass the second-layer representation for the child fragment $\mathbf{z}^c$ and parent fragment $\mathbf{z}^p$ to a pairwise linear layer: $\mathbf{W}^e[\mathbf{W}^c\mathbf{z}^c \odot \mathbf{W}^p\mathbf{z}^p]$ .
|
| 131 |
+
|
| 132 |
+
# 3.3 Graph-aware decoding
|
| 133 |
+
|
| 134 |
+
At inference time, our graph decoder rewrites non-terminals left-to-right by choosing the fragment with the highest probability, and then predicts terminal and/or edge labels. The rank of a non-terminal and the variable references it takes as arguments place a hard constraint on the fragment that rewrites in its place (as shown in § 2). Only by satisfying these constraints, the model can ensure well-formedness of generated graphs.
|
| 135 |
+
|
| 136 |
+
By default, our decoder does not explicitly follow these constraints and can substitute a non-terminal with any fragment in the grammar. This is to assess whether a vanilla decoder can learn to substitute in a fragment that correctly matches a non-terminal. On top of the vanilla decoder, we then exploit these hard constrains in two different ways, as follows:
|
| 137 |
+
|
| 138 |
+
Rank prediction. We incorporate information about rank as a soft constraint during learning by having the model predict it at each time step. This means that the model can still predict a fragment whose rank and variable references do not match those of a non-terminal but it is guided not to do so. We treat rank prediction as a classification task where we use the same features as fragment prediction that we then pass to a linear layer: $\mathbf{r}_t = \mathbf{W}^r [\mathbf{c}_t^1;\mathbf{u}_t;\mathbf{h}_t^1] + \mathbf{b}^r$ . Note that the range of predicted ranks is determined by the training grammar so it is not possible to generate a rank that has not been observed and doesn't have associated rules.
|
| 139 |
+
|
| 140 |
+
Constrained decoding. We explicitly ask the model to choose only amongst those fragments that can match the rank and variable references of a non-terminal. This may override model predictions but always ensures that a graph is well-formed. To ensure well-formedness, FA19 only checks for rank. This can lead to infelicitous consequences. Consider for instance the substitution in Figure 6. Both fragments at the bottom of the middle and right representations are of rank 2 but whereas the first allows for the edges to refer back to the reentrant nodes, the second introduces an extra reentrant node, leaving therefore one of the reentrant
|
| 141 |
+
|
| 142 |
+
nodes disconnected. Checking just for rank is therefore not enough; one also needs to check whether a reentrant node that will substitute in a variable reference has already been generated. If not, any fragment of the same rank can be accepted. If such a node already exists, only fragments that do not introduce another reentrant node can be accepted. This constrained decoding strategy is what allows us to always generate well-formed graphs; we integrate this validation step in the decoding algorithm when selecting the candidate fragment.
|
| 143 |
+
|
| 144 |
+
Finally, we integrate these hard constraints in the softmax layer as well. Instead of normalizing the logits across all fragment types with a single softmax operation, we normalize them separately for each rank. The errors are only propagated through the subset of parameters in $\mathbf{W}^f$ and $\mathbf{b}^f$ responsible for the logits within the target rank $r_t$ .
|
| 145 |
+
|
| 146 |
+
# 3.4 Training objective
|
| 147 |
+
|
| 148 |
+
Our objective is to maximize the log-likelihood of the full graph $P(G|s)$ approximated by the decomposition over each prediction task separately:
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
\begin{array}{l} \sum_ {t} \log P (f _ {t}) + \log P (\ell_ {t}) + \log P (r _ {t}) \\ + \log P \left(s _ {t}\right) + \log P \left(p _ {t}\right) + \sum_ {i} \log P \left(e _ {i}\right) \tag {5} \\ \end{array}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
where $f_{t}$ is the fragment; $\ell_t$ is the label; $r_t$ is the (optional) rank of $f_{t}$ ; $s_t$ and $p_t$ are the sense and presupposition label of terminal label $\ell_t$ ; $e_i \dots e_{|e|}$ are the edge labels of $f_{t}$ . To prevent our model from overfitting, rather than directly optimizing the log-likelihoods, we apply label smoothing for each prediction term (Szegedy et al., 2016).
|
| 155 |
+
|
| 156 |
+
# 4 Experimental setup
|
| 157 |
+
|
| 158 |
+
# 4.1 Data
|
| 159 |
+
|
| 160 |
+
We evaluate our parser on the Parallel Meaning Bank (Abzianidze et al., 2017), a multilingual graph bank where sentences in four languages (English (en), Italian (it), German (de) and Dutch (nl)) are annotated with their semantic representations in the form of Discourse Representation Structures (DRS). We test on v.2.2.0 to compare with previous work, and present the first results on v.3.0 on all four languages. We also present results when training on both gold and silver data, where the latter is $\sim 10x$ larger but contains machine-generated
|
| 161 |
+
|
| 162 |
+
<table><tr><td></td><td># training instances</td><td># fragments +edge label</td><td># fragments -edge label</td><td>avg. rank</td></tr><tr><td>PMB2.2.0-g</td><td>4585</td><td>1196</td><td>232</td><td>1.56</td></tr><tr><td>PMB2.2.0-s</td><td>63960</td><td>17414</td><td>2586</td><td>2.85</td></tr><tr><td>PMB3-g</td><td>6618</td><td>1695</td><td>276</td><td>2.22</td></tr><tr><td>PMB3-s</td><td>94776</td><td>36833</td><td>6251</td><td>3.01</td></tr><tr><td>PMB3-it</td><td>2743</td><td>1827</td><td>378</td><td>2.32</td></tr><tr><td>PMB3-de</td><td>5019</td><td>4025</td><td>843</td><td>2.61</td></tr><tr><td>PMB3-nl</td><td>1238</td><td>1338</td><td>318</td><td>2.29</td></tr></table>
|
| 163 |
+
|
| 164 |
+
parsing, of which only a small fraction has been manually edited. Statistics for both versions of the PMB are reported in Appendix B.
|
| 165 |
+
|
| 166 |
+
Our model requires an explicit grammar which we obtain by automatically converting each DAG in the training data into a sequence of productions. This conversion follows the one in FA19 with minor changes; we include details in Appendix C.
|
| 167 |
+
|
| 168 |
+
Statistics regarding the grammars extracted from the PMB are presented in Table 1, where along with the number of training instances and fragments, we report average rank — an indication of how many reentrancies (on average) are present in the graphs. RDGs can be large especially in the case of silver data, where incorrect parses lead to a larger number of fragments extracted and more complex, noisy constructions, as attested by the higher average ranks. More importantly, we show that removing the edge labels from the fragments leads to a drastic reduction in the number of fragments, especially for the silver corpora.
|
| 169 |
+
|
| 170 |
+
# 4.2 Evaluation metrics
|
| 171 |
+
|
| 172 |
+
To evaluate our parser, we need to compare its output DRSs to the gold-standard graph structures. For this, we use the Counter tool of Van Noord et al. (2018), which calculates an F-score by searching for the best match between the variables of the predicted and the gold-standard graphs. Counter's search algorithm is similar to the evaluation system SMATCH for AMR parsing (Cai and Knight, 2013).
|
| 173 |
+
|
| 174 |
+
There might be occurrences where our graph is deemed ill-formed by Counter; we assign these graphs a score of 0. The ill-formedness is however not due to the model itself but to specific requirements placed on the output DRS by the Counter script.
|
| 175 |
+
|
| 176 |
+
Table 1: Statistics for the grammars extracted from the PMB (g - gold; s - silver).
|
| 177 |
+
|
| 178 |
+
<table><tr><td></td><td>P</td><td>R</td><td>F1</td></tr><tr><td>baseline</td><td>80.0</td><td>70.9</td><td>75.2</td></tr><tr><td>+ rank-prediction</td><td>81.0</td><td>72.3</td><td>76.4</td></tr><tr><td>+ constrained-decoding</td><td>80.5</td><td>75.2</td><td>77.8</td></tr><tr><td>+ edge-factorization</td><td>82.5</td><td>78.5</td><td>80.4</td></tr><tr><td>ours-best + silver</td><td>83.8</td><td>80.6</td><td>82.2</td></tr><tr><td>ours-best + filtering</td><td>83.1</td><td>80.5</td><td>81.8</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 2: Ablation results on the dev portion of PMB2.2.0. The top half shows results for models trained on gold data only. The bottom half shows results of models trained on silver+gold data.
|
| 181 |
+
|
| 182 |
+
# 5 Experimental Results
|
| 183 |
+
|
| 184 |
+
We first present results of ablation experiments to understand which model configuration performs best (§ 5.1). We then compare our best-performing model with several existing semantic parsers (§ 5.2), and present our model's performance in multilingual settings (§ 5.3).
|
| 185 |
+
|
| 186 |
+
# 5.1 Ablation experiments
|
| 187 |
+
|
| 188 |
+
Table 2 shows results for our model in various settings. Our baseline is trained on gold data alone, uses a full grammar and performs unconstrained decoding, with and without rank prediction. Note that unconstrained decoding could lead to ill-formed graphs. To better understand the effect of this, we compare the performance of the baseline with a model that uses constrained decoding and thus always generates well-formed graphs. We train all our models on a single TitanX GPU v100. We report hyperparameters and other training details in Appendix D.
|
| 189 |
+
|
| 190 |
+
Our results are different from that of FA19, who show that a baseline model outperforms one with constrained decoding. Not only we find that constrained decoding outperforms the baseline, but we observe that without it, 26 graphs ( $\sim 4\%$ ) are ill-formed. In addition, our results show that predicting edge labels separately from fragments (edge factorization) leads to a substantial improvement in performance, while also drastically reducing the size of the grammar (as shown in Table 1).
|
| 191 |
+
|
| 192 |
+
We also train our best-performing model (oursbest) on the silver and gold data combined (+silver). This is to assess whether more data, albeit noisy, results in better performance. However, noisy data can lead to noisy grammar; to reduce this noise, we experiment with first extracting a grammar from
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
Figure 6: Example of correct (middle) and wrong (right) substitution of non-terminal function (left, in blue) during constrained decoding.
|
| 196 |
+
|
| 197 |
+
the gold training set, and use it to filter the silver set, where only instances that can be derived using the gold grammar are kept (+filtering). The filtering results in smaller grammar (232 vs. 2586 fragments), while at the same time sacrificing only a small percentage of training instances $(10\%)$ .
|
| 198 |
+
|
| 199 |
+
van Noord et al. (2019), Liu et al. (2019) and FA 19 found that models trained on silver data requires an additional training fine-tuning on gold data alone to achieve the best performance; we also follow this strategy in our experiments. Overall, results show that adding silver data improves performance, and that filtering the input silver data leads only to a slight loss in performance while keeping the size of the grammar small.
|
| 200 |
+
|
| 201 |
+
# 5.2 Comparison to previous work
|
| 202 |
+
|
| 203 |
+
We compare our best-performing model against previous work on PMB2.2.0. We first compare the performance on models trained solely on gold data. Besides the DAG-grammar parser of FA19, we compare with the transition-based stackLSTM of Evang (2019) that utilizes a buffer-stack architecture to predict a DRS fragment for each input token using the alignment information in the PMB; our graph parser does not make use of such information and solely relies on attention.
|
| 204 |
+
|
| 205 |
+
We then compare our best-performing model with two models trained on gold plus silver data. van Noord et al. (2019) is a seq2seq parser that decodes an input sentence into a concatenation of clauses, essentially a flattened version of the boxes in Figure 1. Similar to FA19, their model also uses a wide variety of language-dependent features, including part-of-speech, dependency and CCG tags, while ours relies solely on word embeddings. In this respect, our model is similar to Liu et al. (2019)'s that uses the same architecture as the model of van Noord et al. (2019) but replaces the LSTM encoder with a transformer model, without
|
| 206 |
+
|
| 207 |
+
<table><tr><td></td><td>P</td><td>R</td><td>F1</td></tr><tr><td>Fancellu et al. (2019)</td><td>-</td><td>-</td><td>73.4</td></tr><tr><td>Evang (2019)</td><td>-</td><td>-</td><td>74.4</td></tr><tr><td>ours-best</td><td>84.5</td><td>81.3</td><td>82.9</td></tr><tr><td>van Noord et al. (2019)</td><td>-</td><td>-</td><td>86.8</td></tr><tr><td>Liu et al. (2019)</td><td>85.8</td><td>84.5</td><td>85.1</td></tr><tr><td>ours-best + silver</td><td>86.1</td><td>83.6</td><td>84.9</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 3: Comparison with previous work on the test portion of PMB2.2.0. Results in the top half are for models trained on gold data, whereas bottom half shows results for models trained on silver+gold data.
|
| 210 |
+
|
| 211 |
+
the use of additional features.
|
| 212 |
+
|
| 213 |
+
Results are summarized in Table 3. When trained on gold data alone, our model outperforms previous models by a large margin, without relying on alignment information or extra features besides word embeddings. When trained on silver+gold, we close the performance gap with state-of-the-art models that decode into strings, despite relying solely on multilingual word embeddings.
|
| 214 |
+
|
| 215 |
+
# 5.3 Multilingual experiments
|
| 216 |
+
|
| 217 |
+
Table 4 shows the results on languages other than English. In our multilingual experiments, we first train and test monolingual models in each language. In addition, we perform zero-shot experiments by training a model on English and testing it on other languages (cross-lingual). We also take full advantage of the fact that our models rely solely on multilingual word embeddings, and experiment with two other multilingual settings: The bilingual models are trained on data in English plus data in a target language (tested on the target language). The polyglot models combine training data of all four languages (tested on each language). Parameters for all languages in the bilingual and polyglot mod
|
| 218 |
+
|
| 219 |
+
PMB2.2.0
|
| 220 |
+
|
| 221 |
+
<table><tr><td></td><td>en</td><td>de</td><td>nl</td><td>it</td></tr><tr><td>FA19 (monolingual)</td><td>-</td><td>67.9</td><td>65.8</td><td>75.9</td></tr><tr><td>FA19 (cross-lingual)</td><td>-</td><td>63.5</td><td>65.1</td><td>72.1</td></tr><tr><td>Ours (cross-lingual)</td><td>-</td><td>73.4</td><td>73.9</td><td>76.9</td></tr><tr><td colspan="5">ours-best (various) trained and tested on PMB3</td></tr><tr><td>monolingual</td><td>80</td><td>64.2</td><td>60.9</td><td>71.5</td></tr><tr><td>cross-lingual</td><td>-</td><td>73.2</td><td>74.1</td><td>75.2</td></tr><tr><td>bilingual</td><td>-</td><td>71.8</td><td>76.0</td><td>77.7</td></tr><tr><td>polyglot</td><td>79.8</td><td>72.5</td><td>74.1</td><td>77.9</td></tr></table>
|
| 222 |
+
|
| 223 |
+
Table 4: Results for the multilingual experiments on the test sets for PMB2.2.0 (top half) and PMB3.0 (bottom half). For the sake of brevity, we report only $\mathrm{F}_1$ scores here, and refer the reader to Table 6 in Appendix E for Precision and Recall values.
|
| 224 |
+
|
| 225 |
+
els are fully shared.
|
| 226 |
+
|
| 227 |
+
FA19 only experiment with a cross-lingual model trained with additional language-dependent features, some of which available only for a small number of languages (on PMB2.2.0). We therefore compare our cross-lingual models with theirs on PMB2.2.0. We then introduce the first results on PMB3, where we experiment with the other two multilingual settings.
|
| 228 |
+
|
| 229 |
+
Our results tell a different story from FA19, where all of our multilingual models (bilingual, polyglot and cross-lingual) outperform the corresponding monolingual baselines. We hypothesize this is mainly due to the fact that for languages other than English, only small silver training data are available and adding a large gold English data might help dramatically with performance. This hypothesis is also reinforced by the fact that a cross-lingual model training on English data alone can reach a performance comparable to the other two models.
|
| 230 |
+
|
| 231 |
+
# 6 Conclusions
|
| 232 |
+
|
| 233 |
+
In this paper, we have introduced a graph parser that can fully harness the power of DAG grammars in a seq2seq architecture. Our approach is efficient, fully multilingual, always guarantees well-formed graphs and can rely on small grammars, while outperforming previous graph-aware parsers in English, Italian, German and Dutch by large margin. At the same time, we close the gap between string-based and RDG-based decoding. In the future, we are planning to extend this work to other semantic formalisms (e.g. AMR, UCCA) as well as test
|
| 234 |
+
|
| 235 |
+
ing on other languages, so to encourage work in languages other than English.
|
| 236 |
+
|
| 237 |
+
# Acknowledgments
|
| 238 |
+
|
| 239 |
+
We thank three anonymous reviewers for their useful comments. Research was conducted at Samsung AI Centre Toronto and funded by Samsung Research, Samsung Electronics Co., Ltd.
|
| 240 |
+
|
| 241 |
+
# References
|
| 242 |
+
|
| 243 |
+
Lasha Abzianidze, Johannes Bjerva, Kilian Evang, Hessel Haagsma, Rik Van Noord, Pierre Ludmann, Duc-Duy Nguyen, and Johan Bos. 2017. The parallel meaning bank: Towards a multilingual corpus of translations annotated with compositional meaning representations. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics.
|
| 244 |
+
|
| 245 |
+
Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffith, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2013. Abstract meaning representation for sembanking. In Proceedings of the 7th linguistic annotation workshop and interoperability with discourse, pages 178-186.
|
| 246 |
+
|
| 247 |
+
Valerio Basile and Johan Bos. 2013. Aligning formal meaning representations with surface strings for wide-coverage text generation. In Proceedings of the 14th European Workshop on Natural Language Generation, pages 1-9, Sofia, Bulgaria. Association for Computational Linguistics.
|
| 248 |
+
|
| 249 |
+
Henrik Björklund, Frank Drewes, and Petter Ericson. 2016. Between a rock and a hard place-uniform parsing for hyperedge replacement DAG grammars. In Proceedings of the International Conference on Language and Automata Theory and Applications, pages 521-532. Springer.
|
| 250 |
+
|
| 251 |
+
Shu Cai and Kevin Knight. 2013. Smatch: An evaluation metric for semantic feature structures. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 748-752.
|
| 252 |
+
|
| 253 |
+
Yufei Chen, Weiwei Sun, and Xiaojun Wan. 2018. Accurate shrg-based semantic parsing. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 408-418.
|
| 254 |
+
|
| 255 |
+
David Chiang, Jacob Andreas, Daniel Bauer, Karl Moritz Hermann, Bevan Jones, and Kevin Knight. 2013. Parsing graphs with hyperedge replacement grammars. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 924-932.
|
| 256 |
+
|
| 257 |
+
Ann Copestake, Dan Flickinger, Carl Pollard, and Ivan A Sag. 2005. Minimal recursion semantics: An introduction. Research on language and computation, 3(2-3):281-332.
|
| 258 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL-HLT 2019, pages 4171-4186.
|
| 259 |
+
Lucia Donatelli, Meaghan Fowlie, Jonas Groschwitz, Alexander Koller, Matthias Lindemann, Mario Mina, and Pia Weißenhorn. 2019. Saarland at MRP 2019: Compositional parsing across all graphbanks. In Proceedings of the Shared Task on Cross-Framework Meaning Representation Parsing at the 2019 Conference on Natural Language Learning, pages 66-75.
|
| 260 |
+
Kilian Evang. 2019. Transition-based DRS parsing using stack-LSTMs. In Proceedings of the IWCS Shared Task on Semantic Parsing, Gothenburg, Sweden. Association for Computational Linguistics.
|
| 261 |
+
Federico Fancellu, Sorcha Gilroy, Adam Lopez, and Mirella Lapata. 2019. Semantic graph parsing with recurrent neural network DAG grammars. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, pages 2769-2778.
|
| 262 |
+
Sorcha Gilroy. 2019. Probabilistic graph formalisms for meaning representations.
|
| 263 |
+
Michael Wayne Goodman. 2020. Penman: An open-source library and tool for amr graphs. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 312-319.
|
| 264 |
+
Jonas Groschwitz, Alexander Koller, and Christoph Teichmann. 2015. Graph parsing with s-graph grammars. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1481-1490.
|
| 265 |
+
Jonas Groschwitz, Matthias Lindemann, Meaghan Fowlie, Mark Johnson, and Alexander Koller. 2018. Amr dependency parsing with a typed semantic algebra. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers).
|
| 266 |
+
Annegret Habel. 1992. Hyperedge replacement: grammars and languages, volume 643. Springer Science & Business Media.
|
| 267 |
+
Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 328-339, Melbourne, Australia. Association for Computational Linguistics.
|
| 268 |
+
|
| 269 |
+
Hans Kamp. 1981. A theory of truth and semantic representation. Formal semantics-the essential readings, pages 189-222.
|
| 270 |
+
Percy Liang, Michael I Jordan, and Dan Klein. 2013. Learning dependency-based compositional semantics. Computational Linguistics, 39(2):389-446.
|
| 271 |
+
Jiangming Liu, Shay B Cohen, and Mirella Lapata. 2018. Discourse representation structure parsing. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 429-439.
|
| 272 |
+
Jiangming Liu, Shay B Cohen, and Mirella Lapata. 2019. Discourse representation structure parsing with recurrent neural networks and the transformer model. In Proceedings of the IWCS Shared Task on Semantic Parsing.
|
| 273 |
+
Rik van Noord, Lasha Abzianidze, Antonio Toral, and Johan Bos. 2018. Exploring neural methods for parsing discourse representation structures. Transactions of the Association for Computational Linguistics, 6:619-633.
|
| 274 |
+
Rik van Noord and Johan Bos. 2017. Neural semantic parsing by character-based translation: Experiments with abstract meaning representations. Computational Linguistics in the Netherlands (CLIN).
|
| 275 |
+
Rik van Noord, Antonio Toral, and Johan Bos. 2019. Linguistic information in neural semantic parsing with multiple encoders. In Proceedings of the 13th International Conference on Computational Semantics-Short Papers, pages 24-31.
|
| 276 |
+
Xiaochang Peng, Linfeng Song, and Daniel Gilda. 2015. A synchronous hyperedge replacement grammar based approach for amr parsing. In Proceedings of the Nineteenth Conference on Computational Natural Language Learning, pages 32-41.
|
| 277 |
+
Mike Schuster and Kaisuke Nakajima. 2012. Japanese and korean voice search. In Proceedings of the 2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5149-5152. IEEE.
|
| 278 |
+
Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019. How to fine-tune BERT for text classification? In Proceedings of the China National Conference on Chinese Computational Linguistics, pages 194-206. Springer.
|
| 279 |
+
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. 2016. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2818-2826.
|
| 280 |
+
Rik Van Noord, Lasha Abzianidze, Hessel Haagsma, and Johan Bos. 2018. Evaluating scoped meaning representations. In Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC).
|
| 281 |
+
|
| 282 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the-art natural language processing. arXiv, abs/1910.03771.
|
| 283 |
+
Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhudinov, Rich Zemel, and Yoshua Bengio. 2015. Show, attend and tell: Neural image caption generation with visual attention. In Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 2048-2057, Lille, France. PMLR.
|
| 284 |
+
Sheng Zhang, Xutai Ma, Kevin Duh, and Benjamin Van Durme. 2019a. AMR Parsing as Sequence-to-Graph Transduction. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Florence, Italy. Association for Computational Linguistics.
|
| 285 |
+
Sheng Zhang, Xutai Ma, Kevin Duh, and Benjamin Van Durme. 2019b. Broad-coverage semantic parsing as transduction. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP).
|
| 286 |
+
|
| 287 |
+
# A System architecture
|
| 288 |
+
|
| 289 |
+
An illustration of our system architecture is shown in Figure 7.
|
| 290 |
+
|
| 291 |
+
# B PMB - data statistics
|
| 292 |
+
|
| 293 |
+
<table><tr><td></td><td>train</td><td>dev</td><td>test</td></tr><tr><td>PMB2.2.0-g</td><td>4597 (4585)</td><td>682</td><td>650</td></tr><tr><td>PMB2.2.0-s</td><td>67965 (63960)</td><td>-</td><td>-</td></tr><tr><td>PMB3-g</td><td>6620 (6618)</td><td>885</td><td>898</td></tr><tr><td>PMB3-s</td><td>97598 (94776)</td><td>-</td><td>-</td></tr><tr><td>PMB3-it</td><td>2772 (2743)*</td><td>515</td><td>547</td></tr><tr><td>PMB3-de</td><td>5250 (5019)*</td><td>417</td><td>403</td></tr><tr><td>PMB-nl</td><td>1301 (1238)*</td><td>529</td><td>483</td></tr></table>
|
| 294 |
+
|
| 295 |
+
Table 5: Data statistics for the PMB v.2.2.0 and $3.0(g$ - gold; $s$ - silver). Numbers in parenthesis are the instances we used during training that we were able to extract a derivation tree for. \*: training instances for languages other than English are silver, whereas dev and test are gold
|
| 296 |
+
|
| 297 |
+
# C DAG-grammar extraction
|
| 298 |
+
|
| 299 |
+
Our grammar consists of three steps:
|
| 300 |
+
|
| 301 |
+
Preprocess the DRS. First, we treat all constants as lexical elements and bind them to a variable $c$ . For instance, in Figure 1 we bind 'speaker' to a variable $c_{1}$ and change the relations $\mathrm{AGENT}(e_1, \text{'speaker'})$ and $\mathrm{AGENT}(e_2, \text{'speaker'}')$ into $\mathrm{AGENT}(e_1, c_1)$ and $\mathrm{AGENT}(e_2, c_1)$ , respectively. Second, we deal with multiple lexical elements that map to the same variables (e.g. $\mathrm{cat}(x_1) \wedge \mathrm{entity}(x_1)$ ), where the second predicate specify the 'nature' of the first) by renaming the second variable as $i$ and creating a dummy relation $\mathrm{Of}$ that maps from the first to the second. Finally, we get rid of relations that generate cycles. We found 25 cycles in the PMB, and they are all related to the same phenomenon where the relationships 'Role' and 'Of' have inverted source and target (e.g. person(x1) - Role - mother(x4), mother(x4) - Of - person(x1)). We remove cyclicity by merging the two relations into one edge label. All these changes are then reverted before evaluation.
|
| 302 |
+
|
| 303 |
+
Converting the convert the DRS into a DAG. We convert all main boxes, lexical predicates and constants (now bound to a variable) to nodes whereas binary relations between predicates and boxes are treated as edges. For each box, we identify a root variable (if any) and attach this as child to the box-node with an edge :DRS. A root variable is defined
|
| 304 |
+
|
| 305 |
+
as a variable belonging to a box that is *not* at the receiving end of any binary predicates; in Figure 1, these are $e_1$ and $e_2$ for $b_2$ and $b_3$ respectively. We then follow the binary relations to expand the graph. In doing so, we also incorporate presuppositional boxes in the graph (i.e. $b_4$ in Figure 1). Each of these boxes contain predicates that are presupposed in context (usually definite descriptions like 'the door'). To link presupposed boxes to the main boxes (i.e. to get a fully connected DAG) we assign a (boolean) presupposition feature to the root variable of the presupposed box (this feature is marked with the superscript $p$ in Figure 2). Any descendant predicates of this root variable will be considered as part of the presupposed DRS. During post-processing, when we need to reconstruct the DRS out of a DAG, we rebuild the presupposed box around variables for which presupposition is predicted as 'True', and their descendants.
|
| 306 |
+
|
| 307 |
+
Note that Basile and Bos (2013) proposed a similar conversion to generate Discourse Representation Graphs (DRG), exemplified in Figure 8 using our working example. We argue that our representation is more compact in that: 1) we ignore 'in' edges – where each variable is explicitly marked as part of the box by means of a dedicated edge. This is possible since each box (the square nodes) has a main predicate and all its descendants belong to the box; 2) we treat binary predicates (e.g. AGENT) as edge labels and not nodes; 3) we remove presupposition boxes (in Figure 8, the subgraph rooted in a P labelled edge) and assign a (boolean) presupposition variable to the presupposed predicates.
|
| 308 |
+
|
| 309 |
+
Convert the DAGs into derivation trees. DAGs are converted into derivation trees in two passes following the algorithm in Björklund et al. (2016), which we summarize here; the reader is referred to the original paper for further details. The algorithm consists of two steps: first, for each node $n$ we traverse the graph post-order and store information on the reentrant nodes in the subgraph rooted $n$ . To be more precise, each outgoing edge $e_i$ from $n$ defines a subgraph $s_i$ along which we extract a list of all the reentrant nodes we encounter. This list also includes the node itself, if reentrant.
|
| 310 |
+
|
| 311 |
+
We then traverse the tree depth-first to collect the grammar fragments and build the derivation tree. Each node contains information of its variable (and type), lexical predicate and features as well as a list of the labels on outgoing edges that we plug in the fragments. In order to add variable
|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
Figure 7: Overview of our architecture, following the description is § 3. Our encoder (on the left) computes multilingual word-embeddings using MBERT which then feed into a 2-layers BiLSTM. At the time step $t$ , a 2 layers decoder LSTM (on the right) reconstructs a graph $G$ by predicting fragment $f_{t}$ and terminal label $l_{t}$ . Additionally, parsing on PMB requires to predict for each label $l_{t}$ a sense tag $s_{t}$ and presupposition information $p_{t}$ (a boolean flag). To predict $f_{t}$ we use the hidden state of the decoder first layer (in blue) along with context vector $c_{t}^{f}$ and information about the parent fragment $u_{t}$ (yellow edges). All other predictions are done using the hidden state of the decoder second layer (in red) along a separate context vector $c_{t}^{l}$ . Both context vectors are computed using soft attention over the input representations (top left). Fragments predicted are used to substitute the leftmost non-terminal in the partial graph $G$ (in pink), as shown at the top for $G_{2}\ldots G_{5}$ . For $G_{1}$ the first fragment predicted initializes the graph (this corresponds to substituting the start symbol $S$ ). The edge labels in the fragments above are replaced with placeholders $e_{1}\ldots e_{|e|}$ to display how edge factorization works. We assume here, for brevity, that $G_{5}$ is our final output graph and show the prediction of two edges that substitute in place of the placeholders (box at the bottom). For edge prediction, we use a bundle of features collected during decoding, namely the parent and children fragment embedding $f_{t}$ , the second layer hidden state (in red) and the context vector $c^{l}$ at time $t$ .
|
| 315 |
+
|
| 316 |
+
<table><tr><td rowspan="2"></td><td colspan="3">en</td><td colspan="3">de</td><td colspan="3">nl</td><td colspan="3">it</td></tr><tr><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td></tr><tr><td>monolingual</td><td>81.6</td><td>78.4</td><td>80</td><td>64.5</td><td>64</td><td>64.2</td><td>62.6</td><td>59.2</td><td>60.9</td><td>72.4</td><td>70.6</td><td>71.5</td></tr><tr><td>cross-lingual</td><td>-</td><td>-</td><td>-</td><td>72.8</td><td>73.6</td><td>73.2</td><td>73.4</td><td>74.9</td><td>74.1</td><td>74.2</td><td>76.2</td><td>75.2</td></tr><tr><td>bilingual</td><td>-</td><td>-</td><td>-</td><td>72</td><td>71.5</td><td>71.8</td><td>76.7</td><td>75.3</td><td>76</td><td>76.8</td><td>78.6</td><td>77.7</td></tr><tr><td>polyglot</td><td>81</td><td>78.8</td><td>79.8</td><td>72.2</td><td>72.9</td><td>72.5</td><td>74.3</td><td>73.8</td><td>74.1</td><td>78.2</td><td>77.5</td><td>77.9</td></tr></table>
|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
Figure 8: The DRS of Figure 2 expressed as a Discourse Representation Graph (DRG).
|
| 320 |
+
|
| 321 |
+
references, if any, we need to know whether there are any reentrant nodes that are shared across the subgraphs $s_i \ldots s_{|e|}$ . If so, these become variable references. If the node $n$ itself is reentrant, we flag it with * so that we know that its variable name can substitute a variable reference.
|
| 322 |
+
|
| 323 |
+
# D Implementation Details
|
| 324 |
+
|
| 325 |
+
We use the pre-trained uncased multilingual BERT base model from Wolf et al. (2019). All models trained on English data, monolingual or multilingual, share the same hyper-parameter settings. Languages other than English in the PMB v3.0 have less training data, especially in the cases of Dutch and Italian. Hence, we reduce the model capacity across the board and increase dropout to avoid over-fitting. Hyperparameter settings are shown in Table. 7.
|
| 326 |
+
|
| 327 |
+
We found fine-tuning BERT model necessary to achieve good performance. Following Sun et al. (2019) and Howard and Ruder (2018), we experimented with different fine-tuning strategies, all applied after model performance plateaued:
|
| 328 |
+
|
| 329 |
+
1. setting constant learning rate for BERT layers
|
| 330 |
+
2. gradually unfreezing BERT layer by layer with decaying learning rate
|
| 331 |
+
|
| 332 |
+
3. slanted-triangular learning rate scheduling following Howard and Ruder (2018).
|
| 333 |
+
|
| 334 |
+
We have concluded that strategy 1 works best for our task, with fine-tuning learning rate of 2e-5 for English and a smaller learning rate of 1e-5 for other languages.
|
| 335 |
+
|
| 336 |
+
Table 6: Results for the multilingual experiments on PMB v.3.0 (test set). Monolingual results (top half) are compared with different combinations of multilingual training data (bottom half).
|
| 337 |
+
|
| 338 |
+
<table><tr><td colspan="3">Model Parameters</td></tr><tr><td>BERT</td><td colspan="2">768</td></tr><tr><td>Num of Encoder Layer</td><td colspan="2">2</td></tr><tr><td>Encoder</td><td>en de/nl/it</td><td>2@512 1@512</td></tr><tr><td>Fragment/Relation/Label</td><td>en de/nl/it</td><td>100 75</td></tr><tr><td>Edge Prediction Layer</td><td>en de/nl/it</td><td>100 75</td></tr><tr><td>Decoder</td><td>en de/nl/it</td><td>1024 512</td></tr><tr><td colspan="3">Optimization Parameters</td></tr><tr><td>Optimizer</td><td colspan="2">ADAM</td></tr><tr><td>Learning Rate</td><td colspan="2">0.001</td></tr><tr><td>Weight Decay</td><td colspan="2">1e-4</td></tr><tr><td>Gradient Clipping</td><td colspan="2">5</td></tr><tr><td>Label Smoothing ε</td><td colspan="2">0.1</td></tr><tr><td>Bert Finetune LR</td><td>en de/nl/it</td><td>2e-5 1e-5</td></tr><tr><td>Dropout</td><td>en de/nl/it</td><td>0.33 0.5</td></tr></table>
|
| 339 |
+
|
| 340 |
+
Table 7: Hyper-parameter Settings
|
| 341 |
+
|
| 342 |
+
# E Multilingual experiments - full results
|
| 343 |
+
|
| 344 |
+
All results for the multilingual experiments including precision and recall are shown in Table 6.
|
accuratepolyglotsemanticparsingwithdaggrammars/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a79b646291ea0fb0e5863268667dd8ed11ff44020d0af5fed3810be6d1011399
|
| 3 |
+
size 518540
|
accuratepolyglotsemanticparsingwithdaggrammars/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2985c86999a77801c1f9e62032a87a19c28b9f4ce5636da6f6b6e886d46ef2cc
|
| 3 |
+
size 449131
|
acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/5cc63071-3d79-4af8-bd73-f6002698aecf_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ac68d515b1bd34977c702886be8c5e3111bd04d190d598b3479b7792472a4e8
|
| 3 |
+
size 71117
|
acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/5cc63071-3d79-4af8-bd73-f6002698aecf_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:782dc5dc6cf9f9edeebc79e4aaf2bab83e0512f9ec9a5f61e22d87ee93d8c70e
|
| 3 |
+
size 86018
|
acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/5cc63071-3d79-4af8-bd73-f6002698aecf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bd802c6edc4edff151eaf8094638df77ed2279bbf6de3a4a0010a6565b48cb71
|
| 3 |
+
size 524574
|
acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/full.md
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Compare Aggregate Transformer for Understanding Document-grounded Dialogue
|
| 2 |
+
|
| 3 |
+
Longxuan Ma, Weinan Zhang, Runxin Sun, Ting Liu
|
| 4 |
+
|
| 5 |
+
Research Center for Social Computing and Information Retrieval
|
| 6 |
+
|
| 7 |
+
Harbin Institute of Technology, Harbin, Heilongjiang, China
|
| 8 |
+
|
| 9 |
+
{lxma,wnzhang,rxsun,tliu}@ir.hit.edu.cn
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Unstructured documents serving as external knowledge of the dialogues help to generate more informative responses. Previous research focused on knowledge selection (KS) in the document with dialogue. However, dialogue history that is not related to the current dialogue may introduce noise in the KS processing. In this paper, we propose a Compare Aggregate Transformer (CAT) to jointly denoise the dialogue context and aggregate the document information for response generation. We designed two different comparison mechanisms to reduce noise (before and during decoding). In addition, we propose two metrics for evaluating document utilization efficiency based on word overlap. Experimental results on the CMU.DoG dataset show that the proposed CAT model outperforms the state-of-the-art approach and strong baselines.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Dialogue system (DS) attracts great attention from industry and academia because of its wide application prospects. Sequence-to-sequence models (Seq2Seq) (Sutskever et al., 2014; Serban et al., 2016) are verified to be an effective framework for the DS task. However, one problem of Seq2Seq models is that they tended to generate generic responses that provides deficient information Li et al. (2016); Ghazvininejad et al. (2018). Previous researchers proposed different methods to alleviate this issue. One way is to focus on models' ability to extract information from conversations. Li et al. (2016) introduced Maximum Mutual Information (MMI) as the objective function for generating diverse response. Serban et al. (2017) proposed a latent variable model to capture posterior information of golden response. Zhao et al. (2017) used conditional variational autoencoders to learn discourse-level diversity for neural dialogue models. The
|
| 18 |
+
|
| 19 |
+
# Document:
|
| 20 |
+
|
| 21 |
+
Movie Name: The Shape of Water. Year: 2017. Director: Guillermo del Toro. Genre: Fantasy, Drama.Cast: Sally Hawkins as Elisa Esposito, a mute cleaner who works at a secret government laboratory. ... Critical Response: one of del Toro's most stunningly successful works ...
|
| 22 |
+
|
| 23 |
+
# Dialogue:
|
| 24 |
+
|
| 25 |
+
S1: I thought The Shape of Water was one of Del Toro's best works. What about you?
|
| 26 |
+
S2: Yes, his style really extended the story.
|
| 27 |
+
S1: I agree. He has a way with fantasy elements that really helped this story be truly beautiful. It has a very high rating on rotten tomatoes, too.
|
| 28 |
+
S2: Sally Hawkins acting was phenomenally expressive. Didn't feel her character was mentally handicapped.
|
| 29 |
+
S1: The characterization of her as such was definitely off the mark.
|
| 30 |
+
|
| 31 |
+
Figure 1: One DGD example in the CMUDoG dataset. S1/S2 means Speaker-1/Speaker-2, respectively.
|
| 32 |
+
|
| 33 |
+
other way is introducing external knowledge, either unstructured knowledge texts Ghazvininejad et al. (2018); Ye et al. (2019); Dinan et al. (2019) or structured knowledge triples (Liu et al., 2018; Young et al., 2018; Zhou et al., 2018a) to help open-domain conversation generation by producing responses conditioned on selected knowledge.
|
| 34 |
+
|
| 35 |
+
The Document-grounded Dialogue (DGD) (Zhou et al., 2018b; Zhao et al., 2019; Li et al., 2019) is a new way to use external knowledge. It establishes a conversation mode in which relevant information can be obtained from the given document. One example of DGD is presented in Figure 1. Two interlocutors talk about the given document and freely reference the text segment during the conversation.
|
| 36 |
+
|
| 37 |
+
To address this task, two main challenges need to be considered in a DGD model: 1) Determining which of the historical conversations are related to the current conversation, 2) Using current conversation and the related conversation history to select proper document information and to gener
|
| 38 |
+
|
| 39 |
+
ate an informative response. Previous work Arora et al. (2019); Zhao et al. (2019); Qin et al. (2019); Tian et al. (2020); Ren et al. (2019) generally focused on selecting knowledge with all the conversations. However, the relationship between historical conversations and the current conversation has not been studied enough. For example, in Figure 1, the italics utterance from user1, "Yes, his style really extended the story.", is related to dialogue history. While the black fold utterance from user1, "Sally Hawkins acting was phenomenally expressive. Didn't feel her character was mentally handicapped.", has no direct relationship with the historical utterances. when employing this sentence as the last utterance, the dialogue history is not conducive to generate a response.
|
| 40 |
+
|
| 41 |
+
In this paper, we propose a novel Transformer-based (Vaswani et al., 2017) model for understanding the dialogues and generate informative responses in the DGD, named Compare Aggregate Transformer (CAT). Previous research (Sankar et al., 2019) has shown that the last utterance is the most important guidance for the response generation in the multi-turn setting. Hence we divide the dialogue into the last utterance and the dialogue history, then measure the effectiveness of the dialogue history. If the last utterance and the dialogue history are related, we need to consider all the conversations to filter the document information. Otherwise, the existence of dialogue history is equal to the introduction of noise, and its impact should be eliminated conditionally. For this purpose, on one side, the CAT filters the document information with the last utterance; on the other side, the CAT uses the last utterance to guide the dialogue history and employs the guiding result to filter the given document. We judge the importance of the dialogue history by comparing the two parts, then aggregate the filtered document information to generate the response. Experimental results show that our model can generate more relevant and informative responses than competitive baselines. When the dialogue history is less relevant to the last utterance, our model is verified to be even more effective. The main contributions of this paper are:
|
| 42 |
+
|
| 43 |
+
(1) We propose a compare aggregate method to determine the relationship between the historical dialogues and the last utterance. Experiments showed that our model outperformed strong baselines on the CMU.DoG dataset.
|
| 44 |
+
(2) We propose two new metrics to evaluate the
|
| 45 |
+
|
| 46 |
+
document knowledge utilization in the DGD. They are both based on N-gram overlap among generated response, the dialogue, and the document.
|
| 47 |
+
|
| 48 |
+
# 2 Related Work
|
| 49 |
+
|
| 50 |
+
The DGD maintains a dialogue pattern where external knowledge can be obtained from the given document. Most recently, some DGD datasets Zhou et al. (2018b); Moghe et al. (2018); Qin et al. (2019); Gopalakrishnan et al. (2019) have been released to exploiting unstructured document information in conversations.
|
| 51 |
+
|
| 52 |
+
Models trying to address the DGD task can be classified into two categories based on their encoding process with dialogues: one is parallel modeling and the other is incremental modeling. For the first category, Moghe et al. (2018) used a generation-based model that learns to copy information from the background knowledge and a span prediction model that predicts the appropriate response span in the background knowledge. Liu et al. (2019) claimed the first to unify knowledge triples and long texts as a graph. Then employed a reinforce learning process in the flexible multi-hop knowledge graph reasoning process. To improve the process of using background knowledge, (Zhang et al., 2019) firstly adopted the encoder state of the utterance history context as a query to select the most relevant knowledge, then employed a modified version of BiDAF (Seo et al., 2017) to point out the most relevant token positions of the background sequence. Meng et al. (2019) used a decoding switcher to predict the probabilities of executing the reference decoding or generation decoding. Some other researchers (Zhao et al., 2019; Arora et al., 2019; Qin et al., 2019; Meng et al., 2019; Ren et al., 2019) also followed this parallel encoding method. For the second category, Kim et al. (2020) proposed a sequential latent knowledge selection model for Knowledge-Grounded Dialogue. Li et al. (2019) designed an incremental transformer to encode multi-turn utterances along with knowledge in the related document. Meanwhile, a two-way deliberation decoder (Xia et al., 2017) was used for response generation. However, the relationship between the dialogue history and the last utterance is not well studied. In this paper, we propose a compare aggregate method to investigate this problem. It should be pointed out that when the target response changes the topic, the task is to detect whether the topic is ended and to
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
Figure 2: The architecture of the CAT model. "utter" is short for utterance. "doc" is short for document.
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
|
| 61 |
+
initiate a new topic (Akasaki and Kaji, 2019). We do not study the conversation initiation problem in this paper, although we may take it as future work.
|
| 62 |
+
|
| 63 |
+
# 3 The Proposed CAT Model
|
| 64 |
+
|
| 65 |
+
# 3.1 Problem Statement
|
| 66 |
+
|
| 67 |
+
The inputs of the CAT model are the given document $\mathbf{D} = (D_{1}, D_{2}, \dots, D_{d})$ with $d$ words, dialogue history $\mathbf{H} = (H_{1}, H_{2}, \dots, H_{h})$ with $h$ words and the last utterance $\mathbf{L} = (L_{1}, L_{2}, \dots, L_{l})$ with $l$ words. The task is to generate the response $\mathbf{R} = (R_{1}, R_{2}, \dots, R_{r})$ with $r$ tokens with probability:
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
P (\mathbf {R} | \mathbf {H}, \mathbf {L}, \mathbf {D}; \Theta) = \prod_ {i = 1} ^ {r} P (R _ {i} | \mathbf {H}, \mathbf {L}, \mathbf {D}, \mathbf {R} _ {< i}; \Theta), \tag {1}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
where $\mathbf{R}_{< i} = (R_1, R_2, \dots, R_{i-1})$ , $\Theta$ is the model's parameters.
|
| 74 |
+
|
| 75 |
+
# 3.2 Encoder
|
| 76 |
+
|
| 77 |
+
The structure of the CAT model is shown in Figure 2. The hidden dimension of the CAT model is $\widehat{h}$ . We use the Transformer structure (Vaswani et al., 2017). The self-attention is calculated as follow:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\operatorname {A t t e n t i o n} (\mathbf {Q}, \mathbf {K}, \mathbf {V}) = \operatorname {s o f t m a x} \left(\frac {\mathbf {Q} \mathbf {K} ^ {T}}{\sqrt {d _ {k}}}\right) \mathbf {V}, \tag {2}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $\mathbf{Q},\mathbf{K}$ , and $\mathbf{V}$ are the query, the key, and the value, respectively; $d_{k}$ is the dimension of $\mathbf{Q}$ and $\mathbf{K}$ . The encoder and the decoder stack $N$ ( $N = 3$ in our work) identical layers of multihead attention (MAtt):
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\operatorname {M A t t} (\mathbf {Q}, \mathbf {K}, \mathbf {V}) = [ \mathbf {A} _ {1}, \dots , \mathbf {A} _ {n} ] \mathbf {W} ^ {O}, \tag {3}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\mathbf {A} _ {i} = \operatorname {A t t e n t i o n} \left(\mathbf {Q W} _ {i} ^ {Q}, \mathbf {K W} _ {i} ^ {K}, \mathbf {V W} _ {i} ^ {V}\right), \tag {4}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $\mathbf{W}_i^Q, \mathbf{W}_i^K, \mathbf{W}_i^V (i = 1, \dots, n)$ and $\mathbf{W}^O$ are learnable parameters.
|
| 94 |
+
|
| 95 |
+
The encoder of CAT consists of two branches as figure 2 (a). The left branch learns the information selected by dialogue history $\mathbf{H}$ , the right part learns the information chosen by the last utterance $\mathbf{L}$ . After self-attention process, we get $\mathbf{H}_s = \mathrm{MAtt}(\mathbf{H},\mathbf{H},\mathbf{H})$ and $\mathbf{L}_s = \mathrm{MAtt}(\mathbf{L},\mathbf{L},\mathbf{L})$ . Then we employ $\mathbf{L}_s$ to guide the $\mathbf{H}$ . $\mathbf{H}^1 = \mathrm{MAtt}(\mathbf{L}_s,\mathbf{H},\mathbf{H})$ , where $\mathbf{H}^1$ is the hidden state at the first layer. Then we adopt $\mathbf{H}^1$ to select knowledge from the document $\mathbf{D}$ , $\mathbf{D}^1 = \mathrm{FF}(\mathrm{MAtt}(\mathbf{H}^1,\mathbf{D},\mathbf{D}))$ . FF is the feed-forward process. In the second layer, $\mathbf{D}^1$ is the input, $\mathbf{D}_s^1 = \mathrm{MAtt}(\mathbf{D}^1,\mathbf{D}^1,\mathbf{D}^1)$ , $\mathbf{H}^2 = \mathrm{MAtt}(\mathbf{D}_s^1,\mathbf{H},\mathbf{H})$ , $\mathbf{D}^2 = \mathrm{FF}(\mathrm{MAtt}(\mathbf{H}^2,\mathbf{D},\mathbf{D}))$ . After $N$ layers, we obtain the information $\mathbf{D}^n$ selected by $\mathbf{H}$ . In the right branch, we use $\mathbf{L}_s$ to filter the $\mathbf{D}$ . $\widetilde{\mathbf{D}}^n$ is the information selected by $\mathbf{L}$ .
|
| 96 |
+
|
| 97 |
+
# 3.3 Comparison Aggregate
|
| 98 |
+
|
| 99 |
+
As demonstrated by (Sankar et al., 2019), the last utterance played an fundamental role in response generation. We need to preserve the document information filtered by $\mathbf{L}$ , and determine how much information selected by $\mathbf{H}$ is needed. We propose 2 different compare aggregate methods: one is concatenation before decoding and the other is attended comparison in the decoder.
|
| 100 |
+
|
| 101 |
+
# 3.3.1 Concatenation
|
| 102 |
+
|
| 103 |
+
We use average pooling to $\mathbf{H}_s$ and $\mathbf{L}_s$ to get their vector representations $\mathbf{H}_{sa}$ and $\mathbf{L}_{sa} \in \widehat{\mathbb{R}^{h*1}}$ , respectively. The concatenation method calculates relevance score $\alpha$ to determine the importance of $\mathbf{D}^n$ as follow:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\alpha = \tanh \left(\mathbf {H} _ {s a} \mathbf {W} ^ {H} + \mathbf {L} _ {s a} \mathbf {W} ^ {L}\right), \tag {5}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathbf {D} _ {\text {f i n a l}} = \left[ \operatorname {s i g m o i d} \left(\mathbf {W} ^ {\alpha} \alpha\right) * \mathbf {D} ^ {n}; \widetilde {\mathbf {D}} ^ {n} \right], \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\mathbf{W}^H$ , $\mathbf{W}^L \in \mathbb{R}^{\widehat{h}*\widehat{h}}$ , $\mathbf{W}^\alpha \in \mathbb{R}^{1*\widehat{h}}$ are learnable parameters. $[\mathbf{X};\mathbf{Y}]$ is the concatenation of $\mathbf{X}$ and $\mathbf{Y}$ in sentence dimension. * is the element-wise multiplication. Note that the $\mathbf{D}^n$ is guided by $\mathbf{H}$ , the concatenation method performs a second level comparison with $\mathbf{H}$ and $\mathbf{L}$ and then transfers the topic-aware $\mathbf{D}_{final}$ to the two-pass Deliberation Decoder (DD) (Xia et al., 2017). The structure of the DD is shown in Figure 2 (b). The first-pass takes $\mathbf{L}$ and $\mathbf{D}_{final}$ as inputs and learns to generate a contextual coherently response $\mathbf{R}^1$ . The second-pass takes $\mathbf{R}^1$ and the document $\mathbf{D}$ as inputs and learns to inject document knowledge. The DD aggregates document, conversation, and topic information to generate the final response $\mathbf{R}^2$ . Loss is from both the first and the second layers:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
L = - \sum_ {m = 1} ^ {M} \sum_ {i = 1} ^ {r} \left(\log P \left(R _ {i} ^ {1}\right) + \log P \left(R _ {i} ^ {2}\right)\right), \tag {7}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $M$ is the total training example; $R_{i}^{1}$ and $R_{i}^{2}$ are the $i$ -th word generated by the first and second decoder layer, respectively.
|
| 120 |
+
|
| 121 |
+
# 3.3.2 Attended Comparison
|
| 122 |
+
|
| 123 |
+
We employ an Enhanced Decoder (Zheng and Zhou, 2019) to perform the attended comparing. The structure of our Enhanced Decoder is illustrated in Figure 2 (c). It accepts $\mathbf{D}^n$ , $\widetilde{\mathbf{D}}^n$ and the response $\mathbf{R}$ as inputs, applying a different way to compare and aggregate. The merge attention computes weight across all inputs:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\mathbf {P} = \left[ \mathbf {R}; \mathbf {D} ^ {n}; \widetilde {\mathbf {D}} ^ {n} \right] \mathbf {W} _ {P}, \tag {8}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\mathbf {V} _ {\text {m e r g e}} = P _ {R} \mathbf {R} + P _ {D} \mathbf {D} ^ {n} + P _ {\widetilde {D}} \widetilde {\mathbf {D}} ^ {n}, \tag {9}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
where $W_{P}$ is learnable parameters. The dimension of $P$ is 3. $P_{R}, P_{D}$ and $P_{\widetilde{D}}$ are the Softmax
|
| 134 |
+
|
| 135 |
+
results of $\mathbf{P}$ . $\mathbf{V}_{\text {merge }}$ and $\mathbf{L}$ are used for next utterance attention as shown in Figure 2 (c). The output of the Enhanced Decoder is connected to the second layer of DD and we define this new structure as Enhanced Deliberation Decoder (EDD). The loss is the same as Eq. (7).
|
| 136 |
+
|
| 137 |
+
# 4 Experiments
|
| 138 |
+
|
| 139 |
+
# 4.1 Dataset
|
| 140 |
+
|
| 141 |
+
We evaluate our model with the CMU.DoG (Zhou et al., 2018b) dataset. There are 4112 dialogs based on 120 documents in the dataset. One document contains 4 sections, such as movie introduction and scenes. A related section is given for every several consequent utterances. However, the conversations are not constrained to the given section. In our setting, we use the full document (with 4 section) as external knowledge. The average length of documents is around 800 words. We concatenate consequent utterances of the same person as one utterance. When training, we remove the first two or three rounds of greeting sentences. Each sample contains one document, two or more historical utterances, one last utterance, and one golden response. When testing, we use two different versions of the test set. The first follows the process of training data, we name it Reduced version. The second is constructed by comparing the original document section of the conversation based, we preserve the examples that the dialogue history and the last utterance are based on different document sections. For example, dialogue history is based on section 2, the last utterance and response are based on section 3. We name it Sampled version and it is used for testing our models' comprehending ability of the topic transfer in conversations. The data statistics are shown in Table 1. Please refer to Zhou et al. (2018b) for more details. It is worth noting that the sampled version does not represent the proportion of all conversation topic transfers, but it demonstrates this problem better than the Reduced version. We also test our method on the Holl-E Moghe et al. (2018) dataset. Since the processing of the dataset and the experimental conclusions obtained are similar to CMU.DoG, we did not present in this article.
|
| 142 |
+
|
| 143 |
+
# 4.2 Baselines
|
| 144 |
+
|
| 145 |
+
We evaluated several competitive baselines.
|
| 146 |
+
|
| 147 |
+
<table><tr><td>Dataset</td><td>U.Num(train / dev / test)</td><td>W/Utter</td></tr><tr><td>Original</td><td>72922 / 3626 / 11577</td><td>18.6</td></tr><tr><td>Reduced</td><td>66332 / 3269 / 10502</td><td>19.7</td></tr><tr><td>Sampled</td><td>66332 / 3269 / 1317</td><td>19.6</td></tr></table>
|
| 148 |
+
|
| 149 |
+
Table 1: Statistics of the CMU.DoG dataset. "U.Num" means Utterances Numbers, "W/Utter" means average words per utterance.
|
| 150 |
+
|
| 151 |
+
# 4.2.1 RNN-based models
|
| 152 |
+
|
| 153 |
+
VHRED: A Hierarchical Latent Variable Encoder-Decoder Model (Serban et al., 2017), which introduces a global (semantic level) latent variable $Z$ for the problem that HRED (Serban et al., 2016) is difficult to generate meaningful and high-quality replies. $Z$ is calculated with the encoder RNN outputs and the context RNN outputs. The latent variable $Z$ contains some high-level semantic information, which encourages the model to extract abstract semantic concepts. Please refer to Serban et al. (2017) for more details. We use $Z$ to capture the topic transfer in conversations and test three different settings. For the first setting, we do not employ the document knowledge, only use dialogue as input to generate the response. It is recorded as VHRED(-k). For the second one, we use the same encoder RNN with shared parameters to learn the representation of the document and the utterance, then concatenate the final hidden state of them as the input of the context RNN. It is denoted by VHRED(c). For the third one, we use word-level dot-attention (Luong et al., 2015) to get the document-aware utterance representation and use it as the input of context RNN. It is termed as VHRED(a).
|
| 154 |
+
|
| 155 |
+
# 4.2.2 Transformer-based models
|
| 156 |
+
|
| 157 |
+
T-DD/T-EDD: They both use the Transformer as the encoder. The inputs are the concatenation of dialogues and the document. These two models parallel encode the dialogue without detecting topic transfer. The T-DD uses a Deliberation Decoder (DD) as the decoder. The T-EDD uses an Enhanced Deliberation Decoder (EDD) as the decoder.
|
| 158 |
+
|
| 159 |
+
ITDD (Li et al., 2019): It uses Incremental Transformer Encoder (ITE) and two-pass Deliberation Decoder (DD). Incremental Transformer uses multi-head attention to incorporate document sections and context into each utterance's encoding process. ITDD incrementally models dialogues without detecting topic transitions.
|
| 160 |
+
|
| 161 |
+
# 4.3 Evaluation Metrics
|
| 162 |
+
|
| 163 |
+
Automatic Evaluation: We employ perplexity (PPL) (Bengio et al., 2000), BLEU (Papineni et al., 2002) and ROUGE (Lin, 2004). The PPL of the gold response is measured, lower perplexity indicates better performance. BLEU measures the n-gram overlap between a generated response and a gold response. Since there is only one reference for each response, BLEU scores are extremely low. ROUGE measures the n-gram overlap based on the recall rate. Since the conversations are constrained by the background material, ROUGE is reliable.
|
| 164 |
+
|
| 165 |
+
We also introduce two metrics to automatically evaluate the Knowledge Utilization (KU), they are both based on $N$ -grams overlaps. We define one document, conversations and generated response in Test set as $(\mathbf{D},\mathbf{C},\mathbf{R})$ . The $N$ -grams set of each $(\mathbf{D},\mathbf{C},\mathbf{R})$ are termed as $\mathbf{G}_d^N,\mathbf{G}_c^N$ and $\mathbf{G}_r^N$ , respectively. The number of overlapped $N$ -grams of $\mathbf{G}_d^N$ and $\mathbf{G}_r^N$ is recorded as $\mathbf{G}_{dr}^N$ . Tuples which are in $\mathbf{G}_{dr}^N$ but not in $\mathbf{G}_c^N$ is named $\mathbf{G}_{dr - c}^N$ . Then $\mathbf{KU} = \text{len}(\mathbf{G}_{dr - c}^N) / \text{len}(\mathbf{G}_{dr}^N)$ reflects how many $N$ -grams in the document are used in the generated replies, $\text{len}(\mathbf{G})$ is the tuple number in $\mathbf{G}$ . The larger the KU is, the more $N$ -grams of the document is utilized. Since low-frequency tuples may be more representative of text features, we define the reciprocal of the frequency of each tuple $k$ in $\mathbf{G}$ as $\mathbf{R}_k^G$ , which represents the importance of a tuple. Then the Quality of Knowledge Utilization (QKU) is calculated as:
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\mathbf {Q K U} = \sum_ {\left(\mathbf {D}, \mathbf {C}, \mathbf {R}\right)} \frac {\sum_ {k} \mathbf {R} _ {k} ^ {G _ {r}}}{\sum_ {k} \mathbf {R} _ {k} ^ {G _ {d}}}, \quad k \in \mathbf {G} _ {d r - c}. \tag {10}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
If $\mathbf{R}_k^{G_r}$ is more important in response and $\mathbf{R}_k^{G_d}$ is less important in document, the QKU will become even larger. So the smaller QKU means the higher quality of the used document knowledge.
|
| 172 |
+
|
| 173 |
+
Human Evaluation: We randomly sampled 100 conversations from the Sampled test set and obtained 800 responses from eight models. We have 5 graduate students as judges. They score each response with access to previous dialogues and the document. We use three metrics: Fluency, Coherence, and Informativeness. Fluency measures whether the response is a human-like utterance. Coherence measures if the response is coherent with the dialogue context. Informativeness measures if the response contains relevant and correct information from the document. They are scored from 1 to
|
| 174 |
+
|
| 175 |
+
<table><tr><td>Model</td><td>PPL</td><td>BLEU (%)</td><td>ROUGE-L</td><td>KU-2/3 (%)</td><td>QKU-2/3</td></tr><tr><td>VHRED(-k)</td><td>97.3◇ (99.3)*</td><td>0.49* (0.49)*</td><td>7.80* (7.82)*</td><td>-/- (-/-)</td><td>-/- (-/-)</td></tr><tr><td>VHRED(c)</td><td>80.2◇ (85.4)*</td><td>0.79* (0.77)*</td><td>8.64* (8.63)*</td><td>12.0/27.0◇ (12.1/27.6)◇</td><td>3.36/2.82◇ (3.35/2.80)◇</td></tr><tr><td>VHRED(a)</td><td>77.2◇ (78.5)*</td><td>0.84* (0.80)*</td><td>8.98* (8.99)*</td><td>13.7/31.7◇ (13.1/31.3)*</td><td>3.23/2.72* (3.23/2.72)*</td></tr><tr><td>T-DD</td><td>18.2* (20.5)*</td><td>0.90* (0.89)*</td><td>9.23* (9.24)*</td><td>8.0/23.1* (8.0/23.0)*</td><td>2.55/1.94* (2.55/1.95)*</td></tr><tr><td>T-EDD</td><td>18.2* (20.3)*</td><td>0.91* (0.90)*</td><td>9.35* (9.36)*</td><td>8.3/23.5* (8.1/23.4)*</td><td>2.45/1.91* (2.45/1.92)*</td></tr><tr><td>ITDD</td><td>16.2* (18.7)*</td><td>1.01* (0.99)*</td><td>10.12◇ (10.10)*</td><td>9.0/24.5* (9.1/24.4)*</td><td>2.18/1.84* (2.15/1.82)*</td></tr><tr><td>CAT-EDD</td><td>16.0* (18.2)*</td><td>1.14* (1.14)*</td><td>11.10* (11.12)*</td><td>9.5/24.8* (9.7/24.9)*</td><td>2.12/1.77* (2.11/1.76)*</td></tr><tr><td>CAT-DD</td><td>15.2 (16.1)</td><td>1.22 (1.21)</td><td>11.22 (11.22)</td><td>11.0/26.5 (11.1/26.4)</td><td>2.08/1.64 (2.05/1.62)</td></tr></table>
|
| 176 |
+
|
| 177 |
+
Table 2: Automatic evaluations on the CMU.DoG Dataset. $\cdot (\cdot)$ means Reduced (Sampled) test data. We take the CAT-DD as the base model to do the significant test, $\diamond$ and $*$ stands $p < 0.05$ and $p < 0.01$ , respectively.
|
| 178 |
+
|
| 179 |
+
5 (1:very bad, 2:bad, 3:acceptable, 4:good, 5:very good). Overall inter-rater agreement measured by Fliess' Kappa is 0.32 ("fair").
|
| 180 |
+
|
| 181 |
+
# 4.4 Experimental Setup
|
| 182 |
+
|
| 183 |
+
We use OpenNMT-py (Klein et al., 2017) as the code framework. For all models, the pre-trained 300 dimension word embedding (Mikolov et al., 2013) is shared by dialogue, document, and generated responses, the dimension of the hidden size is 300. For the RNN-based models, 3-layer bidirectional GRU and 3-layer GRU are applied for encoder and decoder, respectively. For the Transformer-based models, the layers of both encoder and decoder are set to 3, the number of heads in multi-head attention is 8 and the filter size is 2048. We use Adam ( $\alpha = 0.001$ , $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ , and $\epsilon = 10^{-8}$ ) (Kingma and Ba, 2015) for optimization. The beam size is set to 5 in the decoder. We truncate the words of the document to 800 and the dialogue utterance to 40. All models are trained on a TITAN X (Pascal) GPU. The average training time per epoch is around 40 minutes for the Transformer-based models and around 20 minutes for the RNN-based models.
|
| 184 |
+
|
| 185 |
+
# 5 Analysis
|
| 186 |
+
|
| 187 |
+
# 5.1 Experimental Results study
|
| 188 |
+
|
| 189 |
+
Table 2 shows the automatic evaluations for all models on the Reduced (Sampled) dataset. The dialogue history is 2 rounds. We only present ROUGE-L as ROUGE-1/2 show the same trend as ROUGE-L. Through experiments, we can see that the change range of KU-2 (8.0-13.7) is less than KU-3 (23.1-31.7) on the Reduced data, indicating that the KU-3 can better reflect the amount of knowledge used than KU-2.
|
| 190 |
+
|
| 191 |
+
In the RNN-based models, the VHRED(-k) gets the worst PPL/BLEU/ROUGE, which reveals the importance of injecting document knowl
|
| 192 |
+
|
| 193 |
+
edge in the DGD task. We did not calculate the KU/QKU of the VHRED(-k) since the model did not use document knowledge. The VHRED(a) gets better PPL/BLEU/ROUGE/KU/QKU than the VHRED(c) model, which means the smaller granular extraction of document information benefits more in generating responses.
|
| 194 |
+
|
| 195 |
+
Among the Transformer-based models, The ITDD model gets better PPL/BLEU/ROUGE-L/KU/QKU than the T-DD model, which means the incremental encoding method is stronger than parallel encoding. The CAT-EDD and the CAT-DD models achieve better performance than the T-DD and the T-EDD models, respectively. It indicates that our Compare-Aggregate method is helpful to understand the dialogue. The CAT-EDD model outperforms the ITDD model on all metrics, which indicates that our CAT module automatically learns the topic transfer between conversation history and the last utterance as we expected. The CAT-EDD does not perform as good as the CAT-DD, which shows that it is necessary to set up an independent mechanism to learn topic transfer, rather than automatic learning by attentions in the decoder.
|
| 196 |
+
|
| 197 |
+
Comparing with the RNN-based models, the Transformer-based models get better performance on PPL/BLEU/ROUGE. It proves that the latter is better in the ability of convergence to the ground truth. The VHRED(c) and the VHRED(a) get better KU and worse QKU than the Transformer-based models. It means that the latent variable models increase the diversity of replies and use more document tuples, but their ability to extract unique tuples is not as good as the Transformer-based ones.
|
| 198 |
+
|
| 199 |
+
Table 3 shows the manual evaluations for all models on the Reduced(Sampled) dataset. The CAT-DD model gets the highest scores on Fluency/Coherence/Informativeness. When experimenting with the Sampled test set, we can see that the advantages of our models become greater than
|
| 200 |
+
|
| 201 |
+
<table><tr><td>Model</td><td>Flu.</td><td>Coh.</td><td>Inf.</td></tr><tr><td>VHRED(-k)</td><td>3.71 (3.72)</td><td>2.82 (2.72)</td><td>3.01 (2.82)</td></tr><tr><td>VHRED(c)</td><td>3.73 (3.82)</td><td>3.04 (3.11)</td><td>3.03 (3.05)</td></tr><tr><td>VHRED(a)</td><td>3.84 (3.77)</td><td>3.11 (3.14)</td><td>3.22 (3.06)</td></tr><tr><td>T-DD</td><td>3.84 (3.82)</td><td>3.03 (3.06)</td><td>3.03 (3.06)</td></tr><tr><td>T-EDD</td><td>3.84 (3.83)</td><td>3.02 (3.08)</td><td>3.05 (3.05)</td></tr><tr><td>ITEDD</td><td>3.90 (3.91)</td><td>3.11 (3.12)</td><td>3.43 (3.42)</td></tr><tr><td>CAT-EDD</td><td>4.02 (3.93)</td><td>3.12 (3.33)</td><td>3.33 (3.41)</td></tr><tr><td>CAT-DD</td><td>4.09 (4.09)</td><td>3.39 (3.43)</td><td>3.44 (3.61)</td></tr></table>
|
| 202 |
+
|
| 203 |
+
Table 3: Manual evaluations on the CMU_DOG Dataset. Flu. /Coh. /Inf. / $\cdot$ mean Fluency /Coherence /Informativeness /Reduced (Sampled) test data, respectively.
|
| 204 |
+
|
| 205 |
+
<table><tr><td>Models</td><td>PPL</td><td>BLEU</td><td>KU-2(%) / QKU-2</td></tr><tr><td>CAT-DD</td><td>16.1</td><td>1.21</td><td>11.1 / 2.05</td></tr><tr><td>w/o-left</td><td>19.8*</td><td>0.90*</td><td>8.2* / 2.56*</td></tr><tr><td>w/o-(5,6)</td><td>18.7*</td><td>0.93*</td><td>9.1* / 2.48◇</td></tr><tr><td>w/o-(G)</td><td>18.2*</td><td>0.96*</td><td>9.2◇ / 2.46*</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 4: Ablation Study on the Sampled test set. We take the CAT-DD as the base model to do the significant test, $\diamond$ and $*$ stand for $p < 0.05$ and $p < 0.01$ , respectively. w/o means without.
|
| 208 |
+
|
| 209 |
+
the results of the Reduced version in both automatic and manual evaluations. Our model shows more advantages in datasets with more topic transfer.
|
| 210 |
+
|
| 211 |
+
# 5.2 Ablation Study
|
| 212 |
+
|
| 213 |
+
Table 4 illustrates the ablation study of the CAT-DD model. w/o-left means the left branch is removed and the model degenerates to T-DD which takes the last utterance and document as inputs. We can see that all the automatic evaluation indexes significantly reduce, indicating the dialogue history can not be simply ignored. w/o-(5,6) is a model without Eq. (5) and (6), which is equivalent to simply connect the outputs of the left and the right encoder branches. The results showed that the ability of the model to distinguish the conversation topic transfer is weakened. w/o-(G) is a model removing the utter-attention in the left branch, which means we do not use L to guide the H, the structure of left branch changes to the right branch and the input is H. The performance is declining, which indicates that the guiding process is useful. The significant tests (twotailed student t-test) on PPL/BLEU/KU-2/QKU-2 reveal the effectiveness of each component.
|
| 214 |
+
|
| 215 |
+
# 5.3 History Round Study
|
| 216 |
+
|
| 217 |
+
We use the CAT-DD model and the Sampled test set to study the influence of the historical dialogue rounds. For example, setting dialogue history to 0 means we use only the last utterance, the CAT-DD becomes the w/o-left model in the
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Figure 3: The effect of dialogue history rounds on VHRED(a)/ITDD/CAT-DD models. The abscissa represents the historical dialogue rounds. The ordinate represents the BLEU/KU-3/QKU-3 values.
|
| 223 |
+
|
| 224 |
+
ablation study. Setting dialogue history to $N$ means we use $N$ rounds of dialogue history for the input of the left branch. We set the conversation history to $0/1/2/3/4$ to test the response of VHRED(a)/ITDD/CAT-DD models. Figure 3 shows the trend of BLEU/KU-3/QKU-3. The top figure shows the BLEU trend, the CAT-DD reaches the maximum when the rounds are 2. The continuous increase of rounds does not significantly improve the generation effect. In the middle picture, with the increase of historical dialogue from 0 to 2, the VHRED(a) and the CAT-DD have a visible improvement on the KU-3, which shows that the information contained in the historical dialogue can be identified and affect the extraction of document information. The ITDD model is not as sensitive as the others on the KU-3, indicating that the incremental encoding structure pays more attention to the information of the last utterance. The bottom figure shows the trend of the QKU-3. When the history dialogue increases, the ITDD model keeps stable and the VHRED(a) and the CAT-DD models have a declining trend, which again indicates that the VHRED(a) and the CAT-DD are more sensitive to the historical dialogue.
|
| 225 |
+
|
| 226 |
+
# 5.4 History Importance Study
|
| 227 |
+
|
| 228 |
+
Figure 4 shows the average sigmoid $(W^{\alpha}\alpha)$ value in the CAT-DD model over the Reduced/Sampled test set and the Validation set. A higher value means a stronger correlation between the last utterance and the historical dialogue. We can see that
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
Figure 4: The rating of dialogue history in the CAT-DD model with Reduced and Sampled test set. The abscissa represents the dialogue rounds and the ordinate represents the correlation score in the model.
|
| 232 |
+
Figure 5: Case study in the CMU.DoG Sampled Dataset. S1/S2 means Speaker-1/Speaker-2, respectively. (w/o-(5,6)) and (w/o-(G)) are models in the ablation study.
|
| 233 |
+
|
| 234 |
+
<table><tr><td>Document:
|
| 235 |
+
... sally hawkins as elisa esposito, a mute cleaner who works at a secret government laboratory. michael shannon as colonel richard strickland ... rating rotten tomatoes: 92% The shape of water is a 2017 american fantasy film ... it stars sally hawkins, michael shannon, richard jenkins, Doug jones, michael stuhlbarg, and octavia spencer ...</td></tr><tr><td>Dialogue history:
|
| 236 |
+
S1: I wonder if it's a government creation or something captured from the wild. i would assume the wild.
|
| 237 |
+
S2: It was captured for governmental experiments.
|
| 238 |
+
The last Utterance:
|
| 239 |
+
S1: Is it a big name cast?</td></tr><tr><td>Groud truth:
|
| 240 |
+
S2: Sally hawkins played the role of the mute cleaner, mic-hael shannon played the role of colonel richard strickland.</td></tr><tr><td>Generated response:
|
| 241 |
+
VHRED(a): it has rating rotten tomatoes: 92%.
|
| 242 |
+
TDD: i am not sure about it.
|
| 243 |
+
ITDD: yes, sally hawkins as elisa esposito.
|
| 244 |
+
CAT-DD: sally hawkins, michael shannon, richard jenkins, doug jones, michael stuhlbarg, and octavia spencer.
|
| 245 |
+
(w/o-(5,6)): yes, sally hawkins works at a secret governme-
|
| 246 |
+
nt laboratory.
|
| 247 |
+
(w/o-(G)): it is a 2017 american fantasy film.</td></tr></table>
|
| 248 |
+
|
| 249 |
+
on the Reduced test set and the Validation set, the relevance score is higher than that of the Sampled data, which proves that the last utterance and the historical dialogue are more irrelevant in the latter. Our model captures this change and performs better on the Sampled data than the Reduced data. When the historical rounds increase from 1 to 2, the relevance score reduces obviously for all data sets, which means the increase of dialogue history introduces more unrelated information. When the historical conversations increases from 2 to 6, all data have no significant change, indicating that increasing the dialogue rounds does not improve the recognition ability of the model to the topic change.
|
| 250 |
+
|
| 251 |
+
# 5.5 Case Study
|
| 252 |
+
|
| 253 |
+
In Figure 5, we randomly select an example in the Sampled test set for a case study. The document,
|
| 254 |
+
|
| 255 |
+
the dialogue history, the last utterance, and the ground truth are presented. We can observe that the last utterance is irrelevant to the dialogue history. The generated responses of different models are listed below. The VHRED(a) and CAT-DD(w/o-G) models misunderstand the dialogue and use the wrong document knowledge. The TDD gives a generic reply. The ITDD model answers correctly but without enough document information. The CAT-DD(w/o-(5,6)) model gives a response that was influenced by the irrelevant historical dialogue which we want to eliminate. Only the CAT-DD model generates a reasonable reply and uses the correct document knowledge, which means it correctly understands the dialogues.
|
| 256 |
+
|
| 257 |
+
# 6 Conclusion
|
| 258 |
+
|
| 259 |
+
We propose the Compare Aggregate method to understand Document-grounded Dialogue (DGD). The dialogue is divided into the last utterance and the dialogue history. The relationship between the two parts is analyzed to denoise the dialogue context and aggregate the document information for response generation. Experiments show that our model outperforms previous work in both automatic and manual evaluations. Our model can better understand the dialogue context and select proper document information for response generation. We also propose Knowledge Utilization (KU) and Quality of Knowledge Utilization (QKU), which are used to measure the quantity and quality of the imported external knowledge, respectively. In the future, we will further study the topic transition problem and the knowledge injecting problem in the DGD.
|
| 260 |
+
|
| 261 |
+
# Acknowledgments
|
| 262 |
+
|
| 263 |
+
This paper is supported by the National Natural Science Foundation of China under Grant No. 62076081, No.61772153 and No.61936010.
|
| 264 |
+
|
| 265 |
+
# References
|
| 266 |
+
|
| 267 |
+
Satoshi Akasaki and Nobuhiro Kaji. 2019. Conversation initiation by diverse news contents introduction. In *NAACL-HLT (1)*, pages 3988-3998. Association for Computational Linguistics.
|
| 268 |
+
Siddhartha Arora, Mitesh M. Khapra, and Harish G. Ramaswamy. 2019. On knowledge distillation from complex networks for response prediction. In NAACL-HLT (1), pages 3813-3822. Association for Computational Linguistics.
|
| 269 |
+
|
| 270 |
+
Yoshua Bengio, Réjean Ducharme, and Pascal Vincent. 2000. A neural probabilistic language model. In NIPS, pages 932-938. MIT Press.
|
| 271 |
+
Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2019. Wizard of wikipedia: Knowledge-powered conversational agents. In ICLR (Poster). OpenReview.net.
|
| 272 |
+
Marjan Ghazvininejad, Chris Brockett, Ming-Wei Chang, Bill Dolan, Jianfeng Gao, Wen-tau Yih, and Michel Galley. 2018. A knowledge-grounded neural conversation model. In AAAI, pages 5110-5117. AAAI Press.
|
| 273 |
+
Karthik Gopalakrishnan, Behnam Hedayatnia, Qinlang Chen, Anna Gottardi, Sanjeev Kwatra, Anu Venkatesh, Raefer Gabriel, Dilek Hakkani-Tur, and Amazon Alexa AI. 2019. Topical-chat: Towards knowledge-grounded open-domain conversations. Proc. Interspeech 2019, pages 1891-1895.
|
| 274 |
+
Byeongchang Kim, Jaewoo Ahn, and Gunhee Kim. 2020. Sequential latent knowledge selection for knowledge-grounded dialogue. CoRR, abs/2002.07510.
|
| 275 |
+
Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
|
| 276 |
+
Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senellart, and Alexander Rush. 2017. OpenNMT: Open-source toolkit for neural machine translation. In Proceedings of ACL 2017, System Demonstrations, pages 67-72, Vancouver, Canada. Association for Computational Linguistics.
|
| 277 |
+
Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting objective function for neural conversation models. In HLT-NAACL, pages 110-119. The Association for Computational Linguistics.
|
| 278 |
+
Zekang Li, Cheng Niu, Fandong Meng, Yang Feng, Qian Li, and Jie Zhou. 2019. Incremental transformer with deliberation decoder for document grounded conversations. In ACL (1), pages 12-21. Association for Computational Linguistics.
|
| 279 |
+
Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.
|
| 280 |
+
Shuman Liu, Hongshen Chen, Zhaochun Ren, Yang Feng, Qun Liu, and Dawei Yin. 2018. Knowledge diffusion for neural dialogue generation. In ACL (1), pages 1489-1498. Association for Computational Linguistics.
|
| 281 |
+
Zhibin Liu, Zheng-Yu Niu, Hua Wu, and Haifeng Wang. 2019. Knowledge aware conversation generation with reasoning on augmented graph. CoRR, abs/1903.10245.
|
| 282 |
+
|
| 283 |
+
Thang Luong, Hieu Pham, and Christopher D. Manning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, EMNLP 2015, Lisbon, Portugal, September 17-21, 2015, pages 1412-1421. The Association for Computational Linguistics.
|
| 284 |
+
Chuan Meng, Pengjie Ren, Zhumin Chen, Christof Monz, Jun Ma, and Maarten de Rijke. 2019. Refnet: A reference-aware network for background based conversation. CoRR, abs/1908.06449.
|
| 285 |
+
Tomas Mikolov, Ilya Sutskever, Kai Chen, Gregory S. Corrado, and Jeffrey Dean. 2013. Distributed representations of words and phrases and their compositionality. In Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Proceedings of a meeting held December 5-8, 2013, Lake Tahoe, Nevada, United States, pages 3111-3119.
|
| 286 |
+
Nikita Moghe, Siddhartha Arora, Suman Banerjee, and Mitesh M. Khapra. 2018. Towards exploiting background knowledge for building conversation systems. In EMNLP, pages 2322-2332. Association for Computational Linguistics.
|
| 287 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In ACL, pages 311-318. ACL.
|
| 288 |
+
Lianhui Qin, Michel Galley, Chris Brockett, Xiaodong Liu, Xiang Gao, Bill Dolan, Yejin Choi, and Jianfeng Gao. 2019. Conversing by reading: Contentful neural conversation with on-demand machine reading. In ACL (1), pages 5427-5436. Association for Computational Linguistics.
|
| 289 |
+
Pengjie Ren, Zhumin Chen, Christof Monz, Jun Ma, and Maarten de Rijke. 2019. Thinking globally, acting locally: Distantly supervised global-to-local knowledge selection for background based conversation. CoRR, abs/1908.09528.
|
| 290 |
+
Chinnadhurai Sankar, Sandeep Subramanian, Chris Pal, Sarath Chandar, and Yoshua Bengio. 2019. Do neural dialog systems use the conversation history effectively? an empirical study. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 32-37.
|
| 291 |
+
Min Joon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2017. Bidirectional attention flow for machine comprehension. In *ICLR (Poster)*. OpenReview.net.
|
| 292 |
+
Iulian Vlad Serban, Alessandro Sordoni, Yoshua Bengio, Aaron C. Courville, and Joelle Pineau. 2016.
|
| 293 |
+
|
| 294 |
+
Building end-to-end dialogue systems using generative hierarchical neural network models. In Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence, February 12-17, 2016, Phoenix, Arizona, USA, pages 3776-3784.
|
| 295 |
+
Iulian Vlad Serban, Alessandro Sordoni, Ryan Lowe, Laurent Charlin, Joelle Pineau, Aaron C. Courville, and Yoshua Bengio. 2017. A hierarchical latent variable encoder-decoder model for generating dialogues. In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, February 4-9, 2017, San Francisco, California, USA, pages 3295-3301.
|
| 296 |
+
Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural networks. In NIPS, pages 3104-3112.
|
| 297 |
+
Zhiliang Tian, Wei Bi, Dongkyu Lee, Lanqing Xue, Yiping Song, Xiaojiang Liu, and Nevin L. Zhang. 2020. Response-anticipated memory for on-demand knowledge integration in response generation. CoRR, abs/2005.06128.
|
| 298 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In NIPS, pages 5998-6008.
|
| 299 |
+
Yingce Xia, Fei Tian, Lijun Wu, Jianxin Lin, Tao Qin, Nenghai Yu, and Tie-Yan Liu. 2017. Deliberation networks: Sequence generation beyond one-pass decoding. In NIPS, pages 1784-1794.
|
| 300 |
+
Hao-Tong Ye, Kai-Ling Lo, Shang-Yu Su, and YunNung Chen. 2019. Knowledge-grounded response generation with deep attentional latent-variable model. CoRR, abs/1903.09813.
|
| 301 |
+
Tom Young, Erik Cambria, Iti Chaturvedi, Hao Zhou, Subham Biswas, and Minlie Huang. 2018. Aug-mentation end-to-end dialogue systems with commonsense knowledge. In AAAI, pages 4970-4977. AAAI Press.
|
| 302 |
+
Yangjun Zhang, Pengjie Ren, and Maarten de Rijke. 2019. Improving background based conversation with context-aware knowledge pre-selection. CoRR, abs/1906.06685.
|
| 303 |
+
Tiancheng Zhao, Ran Zhao, and Maxine Eskenazi. 2017. Learning discourse-level diversity for neural dialog models using conditional variational autoencoders. In ACL (1), pages 654-664. Association for Computational Linguistics.
|
| 304 |
+
Xueliang Zhao, Chongyang Tao, Wei Wu, Can Xu, Dongyan Zhao, and Rui Yan. 2019. A document-grounded matching network for response selection in retrieval-based chatbots. In *IJCAI*, pages 5443-5449. ijcai.org.
|
| 305 |
+
Wen Zheng and Ke Zhou. 2019. Enhancing conversational dialogue models with grounded knowledge. In CIKM, pages 709-718. ACM.
|
| 306 |
+
|
| 307 |
+
Hao Zhou, Tom Young, Minlie Huang, Haizhou Zhao, Jingfang Xu, and Xiaoyan Zhu. 2018a. Commonsense knowledge aware conversation generation with graph attention. In *IJCAI*, pages 4623-4629. ijcai.org.
|
| 308 |
+
Kangyan Zhou, Shrimai Prabhumoye, and Alan W. Black. 2018b. A dataset for document grounded conversations. In EMNLP, pages 708-713. Association for Computational Linguistics.
|
acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24bfc3e39a69d20efee17703f2cc3f997a09838eb0b0db554fb66eedd9c7aea9
|
| 3 |
+
size 469255
|
acompareaggregatetransformerforunderstandingdocumentgroundeddialogue/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d83ef1be3df0fe6875c1db5d62334f1de91d98c87209a879b65435c2c4ca51e
|
| 3 |
+
size 376133
|
aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/6484072a-ed0b-4aaa-ae63-8c2e14ddc8fc_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56ba7825325ff18ed5cc667444ea304e543144017ddf6746e0d4a7da5e258918
|
| 3 |
+
size 80437
|
aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/6484072a-ed0b-4aaa-ae63-8c2e14ddc8fc_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10da4fda7af9ad0c6c9ccaad9fb47bfa6660deb698f0593cc7b8af26b1b1abd6
|
| 3 |
+
size 96138
|
aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/6484072a-ed0b-4aaa-ae63-8c2e14ddc8fc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e0686c6f4d5044135c6e566851daa1b7d5c7e0938590481a6becc4c84d2e21c
|
| 3 |
+
size 690471
|
aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/full.md
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Concise Model for Multi-Criteria Chinese Word Segmentation with Transformer Encoder
|
| 2 |
+
|
| 3 |
+
Xipeng Qiu*, Hengzhi Pei, Hang Yan, Xuanjing Huang
|
| 4 |
+
|
| 5 |
+
Shanghai Key Laboratory of Intelligent Information Processing, Fudan University
|
| 6 |
+
|
| 7 |
+
School of Computer Science, Fudan University
|
| 8 |
+
|
| 9 |
+
{xpqiu, hvpei16, hyan19, xjhuang} @fudan.edu.cn
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Multi-criteria Chinese word segmentation (MCCWS) aims to exploit the relations among the multiple heterogeneous segmentation criteria and further improve the performance of each single criterion. Previous work usually regards MCCWS as different tasks, which are learned together under the multi-task learning framework. In this paper, we propose a concise but effective unified model for MCCWS, which is fully-shared for all the criteria. By leveraging the powerful ability of the Transformer encoder, the proposed unified model can segment Chinese text according to a unique criterion-token indicating the output criterion. Besides, the proposed unified model can segment both simplified and traditional Chinese and has an excellent transfer capability. Experiments on eight datasets with different criteria show that our model outperforms our single-criterion baseline model and other multi-criteria models. Source codes of this paper are available on Github<sup>1</sup>.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Chinese word segmentation (CWS) is a preliminary step to process Chinese text. The mainstream CWS methods regard CWS as a character-based sequence labeling problem, in which each character is assigned a label to indicate its boundary information. Recently, various neural models have been explored to reduce efforts of the feature engineering (Chen et al., 2015a,b; Qun et al., 2020; Wang and Xu, 2017; Kurita et al., 2017; Ma et al., 2018).
|
| 18 |
+
|
| 19 |
+
Recently, Chen et al. (2017) proposed multicriteria Chinese word segmentation (MCCWS) to effectively utilize the heterogeneous resources with different segmentation criteria. Specifically, they regard each segmentation criterion as a single
|
| 20 |
+
|
| 21 |
+
<table><tr><td>Corpora</td><td>Lin</td><td>Dan</td><td>won</td><td colspan="2">the championship</td></tr><tr><td>CTB</td><td colspan="2">林丹</td><td>赢得</td><td colspan="2">总冠军</td></tr><tr><td>PKU</td><td>林</td><td>丹</td><td>赢得</td><td>总</td><td>冠军</td></tr><tr><td>MSRA</td><td colspan="2">林丹</td><td>赢得</td><td>总</td><td>冠军</td></tr></table>
|
| 22 |
+
|
| 23 |
+
Table 1: Illustration of different segmentation criteria.
|
| 24 |
+
|
| 25 |
+
task under the framework of multi-task learning, where a shared layer is used to extract the criteria-invariant features, and a private layer is used to extract the criteria-specific features.
|
| 26 |
+
|
| 27 |
+
However, it is unnecessary to use a specific private layer for each criterion. These different criteria often have partial overlaps. For the example in Table 1, the segmentation of "林丹(Lin Dan)" is the same in CTB and MSRA criteria, and the segmentation of "总|冠军(the championship)" is the same in PKU and MSRA criteria. All these three criteria have the same segmentation for the word "赢得(won)". Although these criteria are inconsistent, they share some partial segmentation. Therefore, it is interesting to use a unified model for all the criteria. At the inference phase, a criterion-token is taken as input to indicate the predict segmentation criterion. Following this idea, Gong et al. (2018) used multiple LSTMs and a criterion switcher at every position to automatically switch the routing among these LSTMs. He et al. (2019) used a shared BiLSTM to deal with all the criteria by adding two artificial tokens at the beginning and end of an input sentence to specify the target criterion. However, due to the long-range dependency problem, BiLSTM is hard to carry the criterion information to each character in a long sentence.
|
| 28 |
+
|
| 29 |
+
In this work, we propose a concise unified model for MCCWS task by integrating shared knowledge from multiple segmentation criteria. Inspired by the success of the Transformer (Vaswani et al., 2017), we design a fully shared architecture for MCCWS, where a shared Transformer encoder is
|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
(a) CTB
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
(b) PKU
|
| 36 |
+
Figure 1: Unified model for MCCWS. $\left[\cdot \right]$ is a special token indicating the output criterion. The label $\{B,M,E,S\}$ of each character indicates it is the begin, middle, end of a word, or a word with single character.
|
| 37 |
+
|
| 38 |
+
used to extract the criteria-aware contextual features, and a shared decoder is used to predict the criteria-specific labels. An artificial token is added at the beginning of the input sentence to determine the output criterion. The similar idea is also used in the field of machine translation, Johnson et al. (2017) used a single model to translate between multiple languages. Figure 1 illustrates our model. There are two reasons to use the Transformer encoder for MCCWS. The primary reason is its neatness and ingenious simplicity to model the criterion-aware context representation for each character. Since the Transformer encoder uses self-attention mechanism to capture the interaction each two tokens in a sentence, each character can immediately perceive the information of the criterion-token as well as the context information. The secondary reason is that the Transformer encoder has potential advantages in capturing the long-range context information and having a better parallel efficiency than the popular LSTM-based encoders. Finally, we exploit the eight segmentation criteria on the five simplified Chinese and three traditional Chinese corpora. Experiments show that the proposed model is effective in improving the performance of MCCWS.
|
| 39 |
+
|
| 40 |
+
The contributions of this paper could be summarized as follows.
|
| 41 |
+
|
| 42 |
+
- We proposed a concise unified model for MC-CWS based on Transformer encoder, which adopts a single fully-shared model to segment sentences with a given target criterion. It is attractive in practice to use a single model to produce multiple outputs with different criteria.
|
| 43 |
+
|
| 44 |
+
- By a thorough investigation, we show the feasibility of using a unified CWS model to segment both simplified and traditional Chinese (see Sec. 4.3). We think it is a promising direction for CWS to exploit the collective knowledge of these two kinds of Chinese.
|
| 45 |
+
- The learned criterion embeddings reflect the relations between different criteria, which make our model have better transfer capability to a new criterion (see Sec. 4.4) just by finding a new criterion embedding in the latent semantic space.
|
| 46 |
+
- It is a first attempt to train the Transformer encoder from scratch for CWS task. Although we mainly address its conciseness and suitability for MCCWS in this paper and do not intend to optimize a specific Transformer encoder for the single-criterion CWS (SCCWS), we prove that the Transformer encoder is also valid for SCCWS. The potential advantages of the Transformer encoder are that it can effectively extract the long-range interactions among characters and has a better parallel ability than LSTM-based encoders.
|
| 47 |
+
|
| 48 |
+
# 2 Background
|
| 49 |
+
|
| 50 |
+
In this section, we first briefly describe the background knowledge of our work.
|
| 51 |
+
|
| 52 |
+
# 2.1 Neural Architecture for CWS
|
| 53 |
+
|
| 54 |
+
Usually, CWS task could be viewed as a character-based sequence labeling problem. Specifically, each character in a sentence $X = \{x_{1},\dots,x_{T}\}$ is labelled as one of $y\in \mathcal{L} = \{B,M,E,S\}$ , indicating the begin, middle, end of a word, or a word with single character. The aim of CWS task is to figure out the ground truth of labels $Y^{*} = \{y_{1}^{*},\ldots ,y_{T}^{*}\}$ :
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
Y ^ {*} = \underset {Y \in \mathcal {L} ^ {T}} {\arg \max } p (Y | X). \tag {1}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
Recently, various neural models have been widely used in CWS and can effectively reduce the efforts of feature engineering. The modern architecture of neural CWS usually consists of three components:
|
| 61 |
+
|
| 62 |
+
**Embedding Layer:** In neural models, the first step is to map discrete language symbols into distributed embedding space. Formally, each character $x_{t}$ is mapped as $\mathbf{e}_{x_t} \in \mathbb{R}^{d_e}$ , where $d_{e}$ is a hyper-parameter indicating the size of character embedding.
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
(a) SCCWS
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
(b) MTL-based MCCWS
|
| 69 |
+
Figure 2: Architectures of SCCWS and MCCWS. The shaded components are shared for different criteria.
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
(c) Unified MCCWS
|
| 73 |
+
|
| 74 |
+
Encoding Layer: The encoding layer is to extract the contextual features for each character.
|
| 75 |
+
|
| 76 |
+
For example, a prevalent choice for the encoding layer is the bi-directional LSTM (BiLSTM) (Hochreiter and Schmidhuber, 1997), which could incorporate information from both sides of sequence.
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\mathbf {h} _ {t} = \operatorname {B i L S T M} \left(\mathbf {e} _ {x _ {t}}, \overrightarrow {\mathbf {h}} _ {t - 1}, \overleftarrow {\mathbf {h}} _ {t + 1}, \theta_ {e}\right), \tag {2}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where $\vec{\mathbf{h}}_t$ and $\vec{\mathbf{h}}_t$ are the hidden states at step $t$ of the forward and backward LSTMs respectively, $\theta_e$ denotes all the parameters in the BiLSTM layer.
|
| 83 |
+
|
| 84 |
+
Besides BiLSTM, CNN is also alternatively used to extract features.
|
| 85 |
+
|
| 86 |
+
Decoding Layer: The extracted features are then sent to conditional random fields (CRF) (Lafferty et al., 2001) layer or multi-layer perceptron (MLP) for tag inference.
|
| 87 |
+
|
| 88 |
+
When using CRF as decoding layer, $p(Y|X)$ in Eq (1) could be formalized as:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
p (Y | X) = \frac {\Psi (Y | X)}{\sum_ {Y ^ {\prime} \in \mathcal {L} ^ {n}} \Psi (Y ^ {\prime} | X)}, \tag {3}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $\Psi (Y|X)$ is the potential function. In first order linear chain CRF, we have:
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\Psi (Y | X) = \prod_ {t = 2} ^ {n} \psi (X, t, y _ {t - 1}, y _ {t}), \tag {4}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\psi (\mathbf {x}, t, y ^ {\prime}, y) = \exp (\delta (X, t) _ {y} + \mathbf {b} _ {y ^ {\prime} y}), \tag {5}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
where $\mathbf{b}_{y'y} \in \mathbf{R}$ is trainable parameters respective to label pair $(y', y)$ , score function $\delta(X, t) \in \mathbb{R}^{|\mathcal{L}|}$ calculates scores of each label for tagging the $t$ -th character:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\delta (X, t) = \mathbf {W} _ {\delta} ^ {\top} \mathbf {h} _ {t} + \mathbf {b} _ {\delta}, \tag {6}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $\mathbf{h}_t$ is the hidden state of encoder at step $t$ , $\mathbf{W}_{\delta} \in \mathbb{R}^{d_h \times |\mathcal{L}|}$ and $\mathbf{b}_{\delta} \in \mathbb{R}^{|\mathcal{L}|}$ are trainable parameters.
|
| 111 |
+
|
| 112 |
+
When using MLP as decoding layer, $p(Y|X)$ in Eq (1) is directly predicted by a MLP with softmax function as output layer.
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
p \left(y _ {t} | X\right) = \operatorname {M L P} \left(\mathbf {h} _ {t}, \theta_ {d}\right), \quad \forall t \in [ 1, T ] \tag {7}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where $\theta_{d}$ denotes all the parameters in MLP layer.
|
| 119 |
+
|
| 120 |
+
Most current state-of-the-art CWS models (Chen et al., 2015a; Xu and Sun, 2016; Liu et al., 2016; Yang et al., 2018; Qun et al., 2020) mainly focus on single-criterion CWS (SCCWS). Figure 2a shows the architecture of SCCWS.
|
| 121 |
+
|
| 122 |
+
# 2.2 MCCWS with Multi-Task Learning
|
| 123 |
+
|
| 124 |
+
To improve the performance of CWS by exploiting multiple heterogeneous criteria corpora, Chen et al. (2017) utilize the multi-task learning framework to model the shared information among these different criteria.
|
| 125 |
+
|
| 126 |
+
Formally, assuming that there are $M$ corpora with heterogeneous segmentation criteria, we refer $\mathcal{D}_m$ as corpus $m$ with $N_{m}$ samples:
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\mathcal {D} _ {m} = \left\{\left(X _ {n} ^ {(m)}, Y _ {n} ^ {(m)}\right) \right\} _ {n = 1} ^ {N _ {m}}, \tag {8}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
where $X_{n}^{(m)}$ and $Y_{n}^{(m)}$ denote the $n$ -th sentence and the corresponding label in corpus $m$ respectively.
|
| 133 |
+
|
| 134 |
+
The encoding layer introduces a shared encoder to mine the common knowledge across multiple corpora, together with the original private encoder. The architecture of MTL-based MCCWS is shown in Figure 2b.
|
| 135 |
+
|
| 136 |
+
Concretely, for corpus $m$ , a shared encoder and a private encoder are first used to extract the criterion-agnostic and criterion-specific features.
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
\mathbf {H} ^ {(s)} = \operatorname {e n c} _ {s} \left(\mathbf {e} _ {X}; \theta_ {e} ^ {(s)}\right), \tag {9}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\mathbf {H} ^ {(m)} = \mathbf {e n c} _ {m} \left(\mathbf {e} _ {X}; \theta_ {e} ^ {(m)}\right), \quad \forall m \in [ 1, M ] \tag {10}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where $\mathbf{e}_X = \{\mathbf{e}_{x_1},\dots ,\mathbf{e}_{x_T}\}$ denotes the embeddings of the input characters $x_{1},\dots ,x_{T}$ , $\mathbf{enc}_s(\cdot)$ represents the shared encoder and $\mathbf{enc}_m(\cdot)$ represents the private encoder for corpus $m$ ; $\theta_e^{(s)}$ and
|
| 147 |
+
|
| 148 |
+
$\theta_{e}^{(m)}$ are the shared and private parameters respectively. The shared and private encoders are usually implemented by the RNN or CNN network.
|
| 149 |
+
|
| 150 |
+
Then a private decoder is used to predict criterion-specific labels. For the $m$ -th corpus, the probability of output labels is
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
p _ {m} (Y | X) = \mathbf {d e c} _ {m} ([ \mathbf {H} ^ {(s)}; \mathbf {H} ^ {(m)} ]; \theta_ {d} ^ {(m)}), \tag {11}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
where $\mathbf{dec}_m(\cdot)$ is a private CRF or MLP decoder for corpus $m(m\in [1,M])$ , taking the shared and private features as inputs; $\theta_d^{(m)}$ is the parameters of the $m$ -th private decoder.
|
| 157 |
+
|
| 158 |
+
Objective The objective is to maximize the log likelihood of true labels on all the corpora:
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
\mathcal {J} _ {\text {s e g}} \left(\Theta^ {m}, \Theta^ {s}\right) = \sum_ {m = 1} ^ {M} \sum_ {n = 1} ^ {N _ {m}} \log p _ {m} \left(Y _ {n} ^ {(m)} \mid X _ {n} ^ {(m)}; \Theta^ {m}, \Theta^ {s}\right), \tag {12}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
where $\Theta^m = \{\theta_e^{(m)},\theta_d^{(m)}\}$ and $\Theta^s = \{\mathbf{E},\theta_e^{(s)}\}$ denote all the private and shared parameters respectively; $\mathbf{E}$ is the embedding matrix.
|
| 165 |
+
|
| 166 |
+
# 3 Proposed Unified Model
|
| 167 |
+
|
| 168 |
+
In this work, we propose a more concise architecture for MCCWS, which adopts the Transformer encoder (Vaswani et al., 2017) to extract the contextual features for each input character. In our proposed architecture, both the encoder and decoder are shared by all the criteria. The only difference for each criterion is that a unique token is taken as input to specify the target criterion, which makes the shared encoder to capture the criterion-aware representation. Figure 2 illustrates the difference between our proposed model and the previous models. A more detailed architecture for MCCWS is shown in Figure 3.
|
| 169 |
+
|
| 170 |
+
# 3.1 Embedding Layer
|
| 171 |
+
|
| 172 |
+
Given a sentence $X = \{x_{1},\ldots ,x_{T}\}$ , we first map it into a vector sequence where each token is a $d_{model}$ dimensional vector. Besides the standard character embedding, we introduce three extra embeddings: criterion embedding, bigram embedding, and position embedding.
|
| 173 |
+
|
| 174 |
+
1) Criterion Embedding: Firstly, we add a unique criterion-token at the beginning of $X$ to indicate the output criterion. For the $m$ -th criterion, the criterion-token is $[m]$ . We use $\mathbf{e}_{[m]}$ to denote its embedding. Thus, the model can learn the relations
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
Figure 3: Proposed Model for MCCWS.
|
| 178 |
+
|
| 179 |
+
between different criteria in the latent embedding space.
|
| 180 |
+
|
| 181 |
+
2) Bigram Embedding: Based on (Chen et al., 2015b; Shao et al., 2017; Zhang et al., 2018), the character-level bigram features can significantly benefit the task of CWS. Following their settings, we also introduce the bigram embedding to augment the character-level unigram embedding. The representation of character $x_{t}$ is
|
| 182 |
+
|
| 183 |
+
$$
|
| 184 |
+
\mathbf {e} _ {x _ {t}} ^ {\prime} = F C \left(\mathbf {e} _ {x _ {t}} \oplus \mathbf {e} _ {x _ {t - 1} x _ {t}} \oplus \mathbf {e} _ {x _ {t} x _ {t + 1}}\right), \tag {13}
|
| 185 |
+
$$
|
| 186 |
+
|
| 187 |
+
where $\mathbf{e}$ denotes the $d$ -dimensional embedding vector for the unigram and bigram, $\oplus$ is the concatenation operator, and FC is a fully connected layer to map the concatenated character embedding with the dimension $3d$ into the embedding $\mathbf{e}_{xt}^{\prime} \in \mathbb{R}^{d_{model}}$ .
|
| 188 |
+
|
| 189 |
+
3) Position Embedding: To capture the order information of a sequence, a position embedding $PE$ is used for each position. The position embedding can be learnable parameters or predefined. In this work, we use the predefined position embedding following (Vaswani et al., 2017). For the $t$ -th character in a sentence, its position embedding is defined by
|
| 190 |
+
|
| 191 |
+
$$
|
| 192 |
+
P E _ {t, 2 i} = \sin \left(t / 1 0 0 0 0 ^ {2 i / d _ {\text {m o d e l}}}\right), \tag {14}
|
| 193 |
+
$$
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
P E _ {t, 2 i + 1} = \cos (t / 1 0 0 0 0 ^ {2 i / d _ {\text {m o d e l}}}), \tag {15}
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
where $i$ denotes the dimensional index of position embedding.
|
| 200 |
+
|
| 201 |
+
Finally, the embedding matrix of the sequence $X = \{x_{1},\dots ,x_{T}\}$ with criterion $m$ is formulated as
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
\mathbf {H} = \left[ \mathbf {e} _ {[ m ]} + P E _ {0}; \mathbf {e} _ {x _ {1}} ^ {\prime} + P E _ {1}; \dots ; \mathbf {e} _ {x _ {T}} ^ {\prime} + P E _ {T} \right], \tag {16}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
where $\mathbf{H} \in \mathbb{R}^{(T + 1) \times d_{model}}$ , $(T + 1)$ and $d_{model}$ represent the length and the dimension of the input vector sequence.
|
| 208 |
+
|
| 209 |
+
# 3.2 Encoding Layer
|
| 210 |
+
|
| 211 |
+
In sequence modeling, RNN and CNN often suffer from the long-term dependency problem and cannot effectively extract the non-local interactions in a sentence. Recently, the fully-connected self-attention architecture, such as Transformer (Vaswani et al., 2017), achieves great success in many NLP tasks.
|
| 212 |
+
|
| 213 |
+
In this work, we adopt the Transformer encoder as our encoding layer, in which several multi-head self-attention layers are used to extract the contextual feature for each character.
|
| 214 |
+
|
| 215 |
+
Given a sequence of vectors $H \in \mathbb{R}^{(T + 1) \times d_{\text{model}}}$ , a single-head self-attention projects $H$ into three different matrices: the query matrix $Q \in \mathbb{R}^{(T + 1) \times d_k}$ , the key matrix $K \in \mathbb{R}^{(T + 1) \times d_k}$ and the value matrix $V \in \mathbb{R}^{(T + 1) \times d_v}$ , and uses scaled dot-product attention to get the output representation.
|
| 216 |
+
|
| 217 |
+
$$
|
| 218 |
+
Q, K, V = H W ^ {Q}, H W ^ {K}, H W ^ {V} \tag {17}
|
| 219 |
+
$$
|
| 220 |
+
|
| 221 |
+
$$
|
| 222 |
+
\operatorname {A t t n} (Q, K, V) = \operatorname {s o f t m a x} (\frac {Q K ^ {T}}{\sqrt {d _ {k}}}) V, \tag {18}
|
| 223 |
+
$$
|
| 224 |
+
|
| 225 |
+
where the matrices $W^{Q}\in \mathbb{R}^{d_{model}\times d_{k}},W^{K}\in$ $\mathbb{R}^{d_{model}\times d_k},W^V\in \mathbb{R}^{d_{model}\times d_v}$ are learnable parameters and softmax(·) is performed row-wise.
|
| 226 |
+
|
| 227 |
+
The Transformer encoder consists of several stacked multi-head self-attention layers and fully-connected layers. Assuming the input of the multi-head self-attention layer is $H$ , its output $\tilde{H}$ is calculated by
|
| 228 |
+
|
| 229 |
+
$$
|
| 230 |
+
Z = \text {l a y e r - n o r m} \left(H + \operatorname {M u l t i H e a d} (H)\right), \tag {19}
|
| 231 |
+
$$
|
| 232 |
+
|
| 233 |
+
$$
|
| 234 |
+
\tilde {H} = \text {l a y e r - n o r m} (Z + \operatorname {F F N} (Z)), \tag {20}
|
| 235 |
+
$$
|
| 236 |
+
|
| 237 |
+
where layer-norm $(\cdot)$ represents the layer normalization (Ba et al., 2016).
|
| 238 |
+
|
| 239 |
+
All the tasks with the different criteria use the same encoder. Nevertheless, with the different criterion-token $[m]$ , the encoder can effectively extract the criterion-aware representation for each character.
|
| 240 |
+
|
| 241 |
+
# 3.3 Decoding Layer
|
| 242 |
+
|
| 243 |
+
In the standard multi-task learning framework, each task has its private decoder to predict the task-specific labels. Different from the previous work, we use a shared decoder for all the tasks since we have extracted the criterion-aware representation for each character. In this work, we use CRF as the
|
| 244 |
+
|
| 245 |
+
decoder since it is slightly better than MLP (see Sec. 4.2).
|
| 246 |
+
|
| 247 |
+
With the fully-shared encoder and decoder, our model is more concise than the shared-private architectures (Chen et al., 2017; Huang et al., 2019).
|
| 248 |
+
|
| 249 |
+
# 4 Experiments
|
| 250 |
+
|
| 251 |
+
Datasets We use eight CWS datasets from SIGHAN2005 (Emerson, 2005) and SIGHAN2008 (Jin and Chen, 2008). Among them, the AS, CITYU, and CKIP datasets are in traditional Chinese, while the MSRA, PKU, CTB, NCC, and SXU datasets are in simplified Chinese. Except where otherwise stated, we follow the setting of (Chen et al., 2017; Gong et al., 2018), and translate the AS, CITYU and CKIP datasets into simplified Chinese. We do not balance the datasets and randomly pick $10\%$ examples from the training set as the development set for all datasets. Similar to the previous work (Chen et al., 2017), we preprocess all the datasets by replacing the continuous Latin characters and digits with a unique token, and converting all digits, punctuation and Latin letters to half-width to deal with the full/half-width mismatch between training and test set.
|
| 252 |
+
|
| 253 |
+
We have checked the annotation schemes of different datasets, which are just partially shared and no two datasets have the same scheme. According to our statistic, the averaged overlap is about $20.5\%$ for 3-gram and $4.4\%$ for 5-gram.
|
| 254 |
+
|
| 255 |
+
Table 2 gives the details of the eight datasets after preprocessing. For training and development sets, lines are split into shorter sentences or clauses by punctuations, in order to make a faster batch.
|
| 256 |
+
|
| 257 |
+
Pre-trained Embedding Based on on (Chen et al., 2015b; Shao et al., 2017; Zhang et al., 2018), n-gram features are of great benefit to Chinese word segmentation and POS tagging tasks. Thus we use unigram and bigram embeddings for our models. We first pre-train unigram and bigram embeddings on Chinese Wikipedia corpus by the method proposed in (Ling et al., 2015), which improves standard word2vec by incorporating token order information.
|
| 258 |
+
|
| 259 |
+
Hyper-parameters We use Adam optimizer (Kingma and Ba, 2014) with the same warmup strategy as (Vaswani et al., 2017). The development set is used for parameter tuning. All the models are trained for 100 epochs. Pre-trained embeddings are fixed for the first 80 epochs and then updated during the following epochs. After
|
| 260 |
+
|
| 261 |
+
Table 2: Details of the eight datasets after preprocessing. "Word Types" represents the number of unique word. "Char Types" is the number of unique characters. "OOV Rate" is Out-Of-Vocabulary rate.
|
| 262 |
+
|
| 263 |
+
<table><tr><td></td><td colspan="2">Corpora</td><td>Words#</td><td>Chars#</td><td>Word Types</td><td>Char Types</td><td>OOV</td></tr><tr><td rowspan="8">Sighan05</td><td rowspan="2">MSRA</td><td>Train</td><td>2.4M</td><td>4.0M</td><td>75.4K</td><td>5.1K</td><td rowspan="2">1.32%</td></tr><tr><td>Test</td><td>0.1M</td><td>0.2M</td><td>11.9K</td><td>2.8K</td></tr><tr><td rowspan="2">AS</td><td>Train</td><td>5.4M</td><td>8.3M</td><td>128.8K</td><td>5.8K</td><td rowspan="2">2.20%</td></tr><tr><td>Test</td><td>0.1M</td><td>0.2M</td><td>18.0K</td><td>3.4K</td></tr><tr><td rowspan="2">PKU</td><td>Train</td><td>1.1M</td><td>1.8M</td><td>51.2K</td><td>4.6K</td><td rowspan="2">2.06%</td></tr><tr><td>Test</td><td>0.1M</td><td>0.2M</td><td>12.5K</td><td>2.9K</td></tr><tr><td rowspan="2">CITYU</td><td>Train</td><td>1.1M</td><td>1.8M</td><td>43.4K</td><td>4.2K</td><td rowspan="2">3.69%</td></tr><tr><td>Test</td><td>0.2M</td><td>0.4M</td><td>23.2K</td><td>3.6K</td></tr><tr><td rowspan="8">Sighan08</td><td rowspan="2">CTB</td><td>Train</td><td>0.6M</td><td>1.0M</td><td>40.5K</td><td>4.2K</td><td rowspan="2">3.80%</td></tr><tr><td>Test</td><td>0.1M</td><td>0.1M</td><td>11.9K</td><td>2.9K</td></tr><tr><td rowspan="2">CKIP</td><td>Train</td><td>0.7M</td><td>1.1M</td><td>44.7K</td><td>4.5K</td><td rowspan="2">4.29%</td></tr><tr><td>Test</td><td>0.1M</td><td>0.1M</td><td>14.2K</td><td>3.1K</td></tr><tr><td rowspan="2">NCC</td><td>Train</td><td>0.9M</td><td>1.4M</td><td>53.3K</td><td>5.3K</td><td rowspan="2">3.31%</td></tr><tr><td>Test</td><td>0.2M</td><td>0.2M</td><td>20.9K</td><td>3.9K</td></tr><tr><td rowspan="2">SXU</td><td>Train</td><td>0.5M</td><td>0.8M</td><td>29.8K</td><td>4.1K</td><td rowspan="2">2.60%</td></tr><tr><td>Test</td><td>0.1M</td><td>0.2M</td><td>11.6K</td><td>2.8K</td></tr></table>
|
| 264 |
+
|
| 265 |
+
<table><tr><td>Embedding Size d</td><td>100</td></tr><tr><td>Hidden State Size dmodel</td><td>256</td></tr><tr><td>Transformer Encoder Layers</td><td>6</td></tr><tr><td>Attention Heads</td><td>4</td></tr><tr><td>Batch Size</td><td>256</td></tr><tr><td>Dropout Ratio</td><td>0.2</td></tr><tr><td>Warmup Steps</td><td>4000</td></tr></table>
|
| 266 |
+
|
| 267 |
+
Table 3: Hyper-Parameter Settings
|
| 268 |
+
|
| 269 |
+
each training epoch, we test the model on the dev set, and models with the highest $F1$ in the dev set are used in the test set. Table 3 shows the detailed hyperparameters.
|
| 270 |
+
|
| 271 |
+
# 4.1 Overall Results
|
| 272 |
+
|
| 273 |
+
Table 4 shows the experiment results of the proposed model on test sets of eight CWS datasets.
|
| 274 |
+
|
| 275 |
+
We first compare our Transformer encoder with the previous models in the single-criterion scenario. The comparison is presented in the upper block of Table 4. Since Switch-LSTMs (Gong et al., 2018) is designed form MCCWS, it is just slight better than BiLSTM in single-criterion scenario. Compared to the LSTM-based encoders, the Transformer encoder brings a noticeable improvement compared to (Chen et al., 2017; Gong et al., 2018), and gives a comparable performance to (Ma et al., 2018). In this work, we do not intend to prove the superiority of the Transformer encoder over LSTM-based encoders in the single-criterion scenario. Our purpose is to build a concise unified model based on Transformer encoder for MCCWS.
|
| 276 |
+
|
| 277 |
+
In the multi-criteria scenario, we compare our unified model with the BiLSTM (Chen et al., 2017) and Switch-LSTMs (Gong et al., 2018). The lower block of Table 4 displays the contrast. Firstly, although different criteria are trained together, our unified model achieves better performance besides CTB. Compared to the single-criterion scenario, 0.42 gain in average $F1$ score is obtained by the multi-criteria scenario. Moreover, our unified model brings a significant improvement of 5.05 in OOV recall. Secondly, compared to previous MCCWS models, our unified model also achieves better average $F1$ score. Especially, our unified model significantly outperforms the unified BiLSTM (He et al., 2019), which indicates the Transformer encoder is more effective in carrying the criterion information than BiLSTM. The reason is that the Transformer encoder can model the interaction of the criterion-token and each character directly, while BiLSTM needs to carry the criterion information step-by-step from the two ends to the middle of the input sentence. The criterion information could be lost for the long sentences.
|
| 278 |
+
|
| 279 |
+
There are about 200 sentences are shared by more than one datasets with different segmentation schemes, but it is not much harder to correctly segment them. Their F1 score is 96.84.
|
| 280 |
+
|
| 281 |
+
Figure 4 visualizes the 2D PCA projection of the learned embeddings of eight different criteria. Generally, the eight criteria are mapped into dispersed points in the embedding space, which indicates
|
| 282 |
+
|
| 283 |
+
<table><tr><td>Models</td><td></td><td>MSRA</td><td>AS</td><td>PKU</td><td>CTB</td><td>CKIP</td><td>CITYU</td><td>NCC</td><td>SXU</td><td>Avg.</td></tr><tr><td colspan="11">Single-Criterion Models</td></tr><tr><td>Stacked BiLSTM (Ma et al., 2018)</td><td>F</td><td>97.4</td><td>96.2</td><td>96.1</td><td>96.7</td><td>-</td><td>97.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>BiLSTM (Chen et al., 2017)</td><td>F</td><td>95.84</td><td>94.2</td><td>93.3</td><td>95.3</td><td>93.06</td><td>94.07</td><td>92.17</td><td>95.17</td><td>94.14</td></tr><tr><td>Switch-LSTMs (Gong et al., 2018)</td><td>F</td><td>96.46</td><td>94.51</td><td>95.74</td><td>97.09</td><td>92.88</td><td>93.71</td><td>92.12</td><td>95.57</td><td>94.76</td></tr><tr><td>Transformer Encoder</td><td>F</td><td>98.07</td><td>96.06</td><td>96.39</td><td>96.41</td><td>95.66</td><td>96.32</td><td>95.57</td><td>97.08</td><td>96.45</td></tr><tr><td>Transformer Encoder</td><td>OOV</td><td>73.75</td><td>73.05</td><td>72.82</td><td>82.82</td><td>79.05</td><td>83.72</td><td>71.81</td><td>77.95</td><td>76.87</td></tr><tr><td colspan="11">Multi-Criteria Models</td></tr><tr><td>BiLSTM (Chen et al., 2017)</td><td>F</td><td>96.04</td><td>94.64</td><td>94.32</td><td>96.18</td><td>94.26</td><td>95.55</td><td>92.83</td><td>96.04</td><td>94.98</td></tr><tr><td>Switch-LSTMs (Gong et al., 2018)</td><td>F</td><td>97.78</td><td>95.22</td><td>96.15</td><td>97.26</td><td>94.99</td><td>96.22</td><td>94.12</td><td>97.25</td><td>96.12</td></tr><tr><td>Unified BiLSTM (He et al., 2019)</td><td>F</td><td>97.2</td><td>95.4</td><td>96.0</td><td>96.7</td><td>-</td><td>96.1</td><td>-</td><td>96.4</td><td>-</td></tr><tr><td>Our Unified Model</td><td>F</td><td>98.05</td><td>96.44</td><td>96.41</td><td>96.99</td><td>96.51</td><td>96.91</td><td>96.04</td><td>97.61</td><td>96.87</td></tr><tr><td>Our Unified Model</td><td>OOV</td><td>78.92</td><td>76.39</td><td>78.91</td><td>87</td><td>82.89</td><td>86.91</td><td>79.3</td><td>85.08</td><td>81.92</td></tr></table>
|
| 284 |
+
|
| 285 |
+
Table 4: Overall results on eight CWS datasets. F and OOV indicate the $F1$ score and OOV recall, respectively. The upper block consists of single-criterion models. Since Stacked BiLSTM (Ma et al., 2018) is a strong SOTA model, the other comparable CWS models are omitted for brevity. The lower block consists of multi-criteria models.
|
| 286 |
+
|
| 287 |
+
<table><tr><td>Models</td><td>MSRA</td><td>AS</td><td>PKU</td><td>CTB</td><td>CKIP</td><td>CITYU</td><td>NCC</td><td>SXU</td><td>Avg.</td></tr><tr><td>Unified Model</td><td>98.05</td><td>96.44</td><td>96.41</td><td>96.99</td><td>96.51</td><td>96.91</td><td>96.04</td><td>97.61</td><td>96.87</td></tr><tr><td>w/o CRF</td><td>98.02</td><td>96.42</td><td>96.41</td><td>96.9</td><td>96.59</td><td>96.87</td><td>95.96</td><td>97.5</td><td>96.83</td></tr><tr><td>w/o bigram</td><td>97.41</td><td>96</td><td>96.25</td><td>96.71</td><td>96</td><td>96.31</td><td>94.62</td><td>96.84</td><td>96.27</td></tr><tr><td>w/o pre-trained emb.</td><td>97.51</td><td>96.06</td><td>96.02</td><td>96.47</td><td>96.22</td><td>95.99</td><td>94.82</td><td>96.76</td><td>96.23</td></tr></table>
|
| 288 |
+
|
| 289 |
+
Table 5: Ablation experiments.
|
| 290 |
+
|
| 291 |
+
that each criterion is different from others. Among them, MSRA is obviously different from others. A possible reason is that the named entity is regarded as a whole word in the MSRA criterion, which is significantly distinguishing with other criteria.
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
Figure 4: Visualization of the criterion embeddings.
|
| 295 |
+
|
| 296 |
+
# 4.2 Ablation Study
|
| 297 |
+
|
| 298 |
+
Table 5 shows the effectiveness of each component in our model.
|
| 299 |
+
|
| 300 |
+
The first ablation study is to verify the effectiveness of the CRF decoder, which is popular in most CWS models. The comparison between the first two lines indicates that with or without CRF does not make much difference. Since a model with CRF takes a longer time to train and inference, we suggest not to use CRF in Transformer encoder models in practice.
|
| 301 |
+
|
| 302 |
+
The other two ablation studies are to evaluate the effect of the bigram feature and pre-trained embeddings. We can see that their effects vary in different datasets. Some datasets are more sensitive to the bigram feature, while others are more sensitive to pre-trained embeddings. In terms of average performance, the bigram feature and pre-trained embeddings are important and boost the performance considerably, but these two components do not have a clear winner.
|
| 303 |
+
|
| 304 |
+
# 4.3 Joint Training on both simplified and Traditional Corpora
|
| 305 |
+
|
| 306 |
+
In the above experiments, the traditional Chinese corpora (AS, CITYU, and CKIP) are translated into simplified Chinese. However, it might be more attractive to jointly train a unified model directly on the mixed corpora of simplified and traditional Chinese without translation. As a reference, the single model has been used to translate between multiple languages in the field of machine translation (Johnson et al., 2017).
|
| 307 |
+
|
| 308 |
+
To thoroughly investigate the feasibility of this idea, we study four different settings to train our model on simplified and traditional Chinese corpora.
|
| 309 |
+
|
| 310 |
+
1. The first setting ("8Simp") is to translate all
|
| 311 |
+
|
| 312 |
+
<table><tr><td>Models</td><td>MSRA</td><td>AS</td><td>PKU</td><td>CTB</td><td>CKIP</td><td>CITYU</td><td>NCC</td><td>SXU</td><td>Avg. F1</td></tr><tr><td>8Simp</td><td>98.05</td><td>96.44</td><td>96.41</td><td>96.99</td><td>96.51</td><td>96.91</td><td>96.04</td><td>97.61</td><td>96.87</td></tr><tr><td>8Trad</td><td>97.98</td><td>96.39</td><td>96.49</td><td>96.99</td><td>96.49</td><td>96.86</td><td>95.98</td><td>97.48</td><td>96.83</td></tr><tr><td>5Simp, 3Trad</td><td>98.03</td><td>96.52</td><td>96.6</td><td>96.94</td><td>96.38</td><td>96.8</td><td>96.02</td><td>97.55</td><td>96.86</td></tr><tr><td>8 Simp, 8 Trad</td><td>98.04</td><td>96.41</td><td>96.43</td><td>96.99</td><td>96.54</td><td>96.85</td><td>96.08</td><td>97.52</td><td>96.86</td></tr></table>
|
| 313 |
+
|
| 314 |
+
Table 6: Joint training on both the simplified and traditional Chinese corpus.
|
| 315 |
+
|
| 316 |
+
<table><tr><td>苹果(apple)</td><td>蘋果(apple)</td><td>爱好(hobby)</td><td>爱好(hobby)</td><td>担心(worry)</td><td>擔心(worry)</td></tr><tr><td>坚果(nut)</td><td>微軟(Microsoft)</td><td>热爱(love)</td><td>熱愛(love)</td><td>关心(care)</td><td>關心(care)</td></tr><tr><td>谷歌(Google)</td><td>黃油(butter)</td><td>兴趣(interest)</td><td>爱好(hobby)</td><td>怀疑(doubt)</td><td>顧慮(misgiving)</td></tr><tr><td>华为(Huawei)</td><td>現貨(goods in stock)</td><td>爱好(hobby)</td><td>興趣(interest)</td><td>顾虑(misgiving)</td><td>懷疑(doubt)</td></tr><tr><td>黃油(butter)</td><td>果凍(jelly)</td><td>梦想(dream)</td><td>夢想(dream)</td><td>担忧(concern)</td><td>擔憂(concern)</td></tr><tr><td>鮮果(fresh fruit)</td><td>京東(JD)</td><td>愛玩(Playful)</td><td>愛玩(playful)</td><td>责怪(blemame)</td><td>憂慮(anxiety)</td></tr><tr><td>微软(Microsoft)</td><td>賣家(seller)</td><td>痴迷(addict)</td><td>喜愛(adore)</td><td>伤心(sad)</td><td>責怪(blemame)</td></tr><tr><td>诺基(Nokia)</td><td>苹果(apple)</td><td>乐趣(pleasure)</td><td>習慣(habbit)</td><td>嫌弃(disfavour)</td><td>傷心(sad)</td></tr><tr><td>蘋果(Apple)</td><td>售後(after-sales)</td><td>喜爱(adore)</td><td>樂趣(pleasure)</td><td>忧虑(anxiety)</td><td>担心(worry)</td></tr></table>
|
| 317 |
+
|
| 318 |
+
Table 7: Qualitative analysis for the joint embedding space of simplified and traditional Chinese. Given the target bigram, we list its top 8 similar bigrams. The bigram with red color indicates it is traditional Chinese.
|
| 319 |
+
|
| 320 |
+
the corpora into simplified Chinese. For the pre-trained embeddings, we use the simplified Chinese Wikipedia dump to pre-train the unigram and bigram embeddings. This way is the same as the previous experiments.
|
| 321 |
+
|
| 322 |
+
2. The second setting ("8Trad") is to translate all the corpora into traditional Chinese. For the pre-trained embeddings, we first convert the Wikipedia dump into traditional Chinese characters, then we use this converted corpus to pre-train unigram and bigram embeddings.
|
| 323 |
+
3. The third setting ("5Simp, 3Trad") is to keep the original characters for five simplified Chinese corpora and three traditional Chinese corpora without translation. The unified model can take as input the simplified or traditional Chinese sentences. In this way, we pre-train the joint simplified and traditional Chinese embeddings in a joint embedding space. We merge the Wikipedia corpora used in "8Trad" and "8Simp" to form a mixed corpus, which contains both the simplified and traditional Chinese characters. The unigram and bigram embeddings are pre-trained on this mixed corpus.
|
| 324 |
+
4. The last setting ("8Simp, 8Trad") is to simultaneously train our model on both the eight simplified Chinese corpora in "8Simp" and the eight traditional Chinese corpora in "8Trad". The pre-trained word embeddings are the same as "5Simp, 3Trad".
|
| 325 |
+
|
| 326 |
+
Table 6 shows that there does not exist too much
|
| 327 |
+
|
| 328 |
+
difference between different settings. This investigation indicates it is feasible to train a unified model directly on two kinds of Chinese characters.
|
| 329 |
+
|
| 330 |
+
To better understand the quality of the learned joint embedding space of the simplified and traditional Chinese, we conduct a qualitative analysis to illustrate the most similar bigrams for a target bigram. Similar bigrams are retrieved based on the cosine similarity calculated using the learned embeddings. As shown in Table 7, the traditional Chinese bigrams are similar to their simplified Chinese counterparts, and vice versa. The results show that the simplified and traditional Chinese bigrams are aligned well in the joint embedding space.
|
| 331 |
+
|
| 332 |
+
# 4.4 Transfer Capability
|
| 333 |
+
|
| 334 |
+
Since except for the criterion embedding, the other parts of the unified model are the same for different criteria, we want to exploit whether a trained unified model can be transferred to a new criterion only by learning a new criterion embedding with few examples.
|
| 335 |
+
|
| 336 |
+
We use the leave-one-out strategy to evaluate the transfer capability of our unified model. We first train a model on seven datasets, then only learn the new criterion embedding with a few training instances from the left dataset. This scenario is also discussed in (Gong et al., 2018), and Figure 5 presents their and our outcomes (averaged $F1$ score). There are two observations: Firstly, for the different number of samples, the transferred model always largely outperforms the models learned
|
| 337 |
+
|
| 338 |
+

|
| 339 |
+
Figure 5: Evaluation of the transfer capability. Switch-LSTMs and Ours are models trained on the given instances from scratch. Switch-LSTMs $(trans)$ and Ours $(trans)$ are models learned in transfer fashion.
|
| 340 |
+
|
| 341 |
+
from scratch. We believe this indicates that learning a new criterion embedding is an effective way to transfer a trained unified model to a new criterion. Secondly, our model also has superior transferability than Switch-LSTMs (Ours $(trans)$ versus Switch-LSTMs $(trans)$ ).
|
| 342 |
+
|
| 343 |
+
# 5 Related Work
|
| 344 |
+
|
| 345 |
+
The previous work on the MCCWS can be categorized into two lines.
|
| 346 |
+
|
| 347 |
+
One line is multi-task based MCCWS. Chen et al. (2017) proposed a multi-criteria learning framework for CWS, which uses a shared layer to extract the common underlying features and a private layer for each criterion to extract criteria-specific features. Huang et al. (2019) proposed a domain adaptive segmenter to capture diverse criteria based on Bidirectional Encoder Representations from Transformer (BERT) (Devlin et al., 2018).
|
| 348 |
+
|
| 349 |
+
Another line is unified MCCWS. Gong et al. (2018) presented Switch-LSTMs to segment sentences, which consists of several LSTM layers, and uses a criterion switcher at every position to change the routing among these LSTMs automatically. However, the complexity of the model makes Switch-LSTMs hard to be applied in practice. He et al. (2019) used a shared BiLSTM by adding two artificial tokens at the beginning and end of an input sentence to specify the output criterion. However, due to the long-range dependency problem, BiLSTM is hard to carry the criterion information to each character in a long sentence.
|
| 350 |
+
|
| 351 |
+
Compared to the above two unified models, we use the Transformer encoder in our unified model,
|
| 352 |
+
|
| 353 |
+
which can elegantly model the criterion-aware context representation for each character. With the Transformer, we just need a special criterion-token to specify the output criterion. Each character can directly attend the criterion-token to be aware of the target criterion. Thus, we can use a single model to produce different segmented results for different criteria. Different from (Huang et al., 2019), which uses the pre-trained Transformer BERT and several extra projection layers for different criteria, our model is a fully-shared and more concise.
|
| 354 |
+
|
| 355 |
+
# 6 Conclusion and Future Work
|
| 356 |
+
|
| 357 |
+
We propose a concise unified model for MCCWS, which uses the Transformer encoder to extract the criterion-aware representation according to a unique criterion-token. Experiments on eight corpora show that our proposed model outperforms the previous models and has a stronger transfer capability. The conciseness of our model makes it easy to be applied in practice.
|
| 358 |
+
|
| 359 |
+
In this work, we only adopt the vanilla Transformer encoder since we just want to utilize its self-attention mechanism to model the criterion-aware context representation for each character neatly. Therefore, it is promising for future work to look for the more effective adapted Transformer encoder for CWS task or to utilize the pre-trained models (Qiu et al., 2020), such as BERT-based MCCWS (Ke et al., 2020). Besides, we are also planning to incorporate other sequence labeling tasks into the unified model, such as POS tagging and named entity recognition.
|
| 360 |
+
|
| 361 |
+
# Acknowledgements
|
| 362 |
+
|
| 363 |
+
This work was supported by the National Natural Science Foundation of China (No. 62022027 and 61976056), Science and Technology on Parallel and Distributed Processing Laboratory (PDL).
|
| 364 |
+
|
| 365 |
+
# References
|
| 366 |
+
|
| 367 |
+
Lei Jimmy Ba, Ryan Kiros, and Geoffrey E. Hinton. 2016. Layer normalization. CoRR, abs/1607.06450.
|
| 368 |
+
Xinchi Chen, Xipeng Qiu, Chenxi Zhu, and Xuanjing Huang. 2015a. Gated recursive neural network for Chinese word segmentation. In Proceedings of Annual Meeting of the Association for Computational Linguistics.
|
| 369 |
+
Xinchi Chen, Xipeng Qiu, Chenxi Zhu, Pengfei Liu, and Xuanjing Huang. 2015b. Long Short-Term
|
| 370 |
+
|
| 371 |
+
Memory Neural Networks for Chinese Word Segmentation. In EMNLP, pages 1197-1206.
|
| 372 |
+
Xinchi Chen, Zhan Shi, Xipeng Qiu, and Xuanjing Huang. 2017. Adversarial multi-criteria learning for Chinese word segmentation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1193-1203.
|
| 373 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805.
|
| 374 |
+
T. Emerson. 2005. The second international Chinese word segmentation bakeoff. In Proceedings of the Fourth SIGHAN Workshop on Chinese Language Processing, pages 123-133. Jeju Island, Korea.
|
| 375 |
+
Jingjing Gong, Xinchi Chen, Tao Gui, and Xipeng Qiu. 2018. Switch-LSTMs for multi-criteria Chinese word segmentation. arXiv preprint arXiv:1812.08033.
|
| 376 |
+
Han He, Lei Wu, Hua Yan, Zhimin Gao, Yi Feng, and George Townsend. 2019. Effective neural solution for multi-criteria word segmentation. In Smart Intelligent Computing and Applications, pages 133-142. Springer.
|
| 377 |
+
Sepp Hochreiter and Jürgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.
|
| 378 |
+
Weipeng Huang, Xingyi Cheng, Kunlong Chen, Taifeng Wang, and Wei Chu. 2019. Toward fast and accurate neural Chinese word segmentation with multi-criteria learning. arXiv preprint arXiv:1903.04190.
|
| 379 |
+
G. Jin and X. Chen. 2008. The fourth international Chinese language processing bakeoff: Chinese word segmentation, named entity recognition and chinese pos tagging. In Sixth SIGHAN Workshop on Chinese Language Processing, page 69.
|
| 380 |
+
Melvin Johnson, Mike Schuster, Quoc V Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Viégas, Martin Wattenberg, Greg Corrado, et al. 2017. Google's multilingual neural machine translation system: Enabling zero-shot translation. Transactions of the Association for Computational Linguistics, 5:339-351.
|
| 381 |
+
Zhen Ke, Liang Shi, Erli Meng, Bin Wang, Xipeng Qiu, and Xuanjing Huang. 2020. Unified multi-criteria Chinese word segmentation with bert. arXiv preprint arXiv:2004.05808.
|
| 382 |
+
Diederik Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.
|
| 383 |
+
|
| 384 |
+
Shuhei Kurita, Daisuke Kawahara, and Sadao Kurohashi. 2017. Neural joint model for transition-based Chinese syntactic analysis. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, volume 1, pages 1204-1214.
|
| 385 |
+
John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning.
|
| 386 |
+
Wang Ling, Chris Dyer, Alan W Black, and Isabel Trancoso. 2015. Two/too simple adaptations of word2vec for syntax problems. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1299-1304.
|
| 387 |
+
Yijia Liu, Wanxiang Che, Jiang Guo, Bing Qin, and Ting Liu. 2016. Exploring segment representations for neural segmentation models. arXiv preprint arXiv:1604.05499.
|
| 388 |
+
Ji Ma, Kuzman Ganchev, and David Weiss. 2018. State-of-the-art Chinese word segmentation with BiLSTMs. arXiv preprint arXiv:1808.06511.
|
| 389 |
+
Xipeng Qiu, Tianxiang Sun, Yige Xu, Yunfan Shao, Ning Dai, and Xuanjing Huang. 2020. Pre-trained models for natural language processing: A survey. SCIENCE CHINA Technological Sciences.
|
| 390 |
+
Nuo Qun, Hang Yan, Xipeng Qiu, and Xuanjing Huang. 2020. Chinese word segmentation via BiLSTM+Semi-CRF with relay node. Journal of Computer Science and Technology, 35(5):1115-1126.
|
| 391 |
+
Yan Shao, Christian Hardmeier, Jörg Tiedemann, and Joakim Nivre. 2017. Character-based joint segmentation and pos tagging for chinese using bidirectional rnnp-crf. arXiv preprint arXiv:1704.01314.
|
| 392 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 5998-6008.
|
| 393 |
+
Chunqi Wang and Bo Xu. 2017. Convolutional neural network with word embeddings for Chinese word segmentation. arXiv preprint arXiv:1711.04411.
|
| 394 |
+
Jingjing Xu and Xu Sun. 2016. Dependency-based gated recursive neural network for chinese word segmentation. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 567-572.
|
| 395 |
+
Jie Yang, Yue Zhang, and Shuailong Liang. 2018. Subword encoding in lattice LSTM for Chinese word segmentation. arXiv preprint arXiv:1810.12594.
|
| 396 |
+
|
| 397 |
+
Meishan Zhang, Nan Yu, and Guohong Fu. 2018. A simple and effective neural model for joint word segmentation and POS tagging. IEEE/ACM Transactions on Audio, Speech and Language Processing (TASLP), 26(9):1528-1538.
|
aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93800a298a78b043dcfe77d766894558d94056c250655e6d5a5ca34f2e77d885
|
| 3 |
+
size 547906
|
aconcisemodelformulticriteriachinesewordsegmentationwithtransformerencoder/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:48a1a48f38ad336bc0716b62bfc97892edbdd5c608d30d8962dd74c3fa54e5f4
|
| 3 |
+
size 395808
|
activelearningapproachestoenhancingneuralmachinetranslation/80b307df-133a-4edf-b209-48832d1b757e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a7fdb1c1200a9ee59ba9cabbc0ff0db130278764c8581549586e06c235811ca7
|
| 3 |
+
size 77426
|
activelearningapproachestoenhancingneuralmachinetranslation/80b307df-133a-4edf-b209-48832d1b757e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90eb7a7668413ef2d57d5df1231cf5af6ab60863b5312851d28f82fe560542c9
|
| 3 |
+
size 96427
|
activelearningapproachestoenhancingneuralmachinetranslation/80b307df-133a-4edf-b209-48832d1b757e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:76c611dbd86758850f5a242df4ad56b00dff8439342e8ae089f32a9714d23057
|
| 3 |
+
size 638656
|
activelearningapproachestoenhancingneuralmachinetranslation/full.md
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Learning Approaches to Enhancing Neural Machine Translation
|
| 2 |
+
|
| 3 |
+
Yuekai Zhao $^{1}$ Haoran Zhang $^{1}$ Shuchang Zhou $^{2}$ Zhihua Zhang $^{3}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> Academy for Advanced Interdisciplinary Studies, Peking University
|
| 6 |
+
|
| 7 |
+
2 Megvii Inc.
|
| 8 |
+
|
| 9 |
+
$^{3}$ School of Mathematical Sciences, Peking University
|
| 10 |
+
|
| 11 |
+
{yuekaizhao, haoran_zhang}@pku.edu.cn
|
| 12 |
+
|
| 13 |
+
zsc@megvii.com
|
| 14 |
+
|
| 15 |
+
zhzhang@math.pku.edu.cn
|
| 16 |
+
|
| 17 |
+
# Abstract
|
| 18 |
+
|
| 19 |
+
Active learning is an efficient approach for mitigating data dependency when training neural machine translation (NMT) models. In this paper we explore new training frameworks by incorporating active learning into various techniques such as transfer learning and iterative back-translation (IBT) under a limited human translation budget. We design a word frequency based acquisition function and combine it with a strong uncertainty based method. The combined method steadily outperforms all other acquisition functions in various scenarios. As far as we know, we are the first to do a large-scale study on actively training Transformer (Vaswani et al., 2017) for NMT. Specifically, with a human translation budget of only $20\%$ of the original parallel corpus, we manage to surpass Transformer trained on the entire parallel corpus in three language pairs.
|
| 20 |
+
|
| 21 |
+
# 1 Introduction
|
| 22 |
+
|
| 23 |
+
Many impressive progresses have been made in neural machine translation (NMT) in the past few years (Luong et al., 2015; Gehring et al., 2017; Vaswani et al., 2017; Wu et al., 2019). However, the general training procedure requires tremendous amounts of high-quality parallel corpus to achieve a deep model's full potential. The scarcity of the training corpus is a common problem for many language pairs, which might lead to the NMT model's poor performance.
|
| 24 |
+
|
| 25 |
+
However, constructing a parallel corpus is a slow and laborious process. Professional human translators and well-trained proofreaders are needed. Although several dual learning (He et al., 2016; Bi et al., 2019) and unsupervised learning (Artetxe et al., 2018; Lample et al., 2017; Lample and Conneau, 2019) approaches have been successfully used, they are often inferior to the supervised models. In such cases, active learning might be a good
|
| 26 |
+
|
| 27 |
+
choice. The goal of active learning in NMT is to train a well-performing model under a limited human translation budget. We achieve this goal by using some particularly designed acquisition functions to select informative sentences to construct a training corpus.
|
| 28 |
+
|
| 29 |
+
Acquisition functions can be categorized into two types: model related and model agnostic. For the former, the methods we use are all based on the idea of uncertainty. For the latter, we devise a word frequency based method which takes linguistic features into consideration. Both types of acquisition functions have been proven to be beneficial in active NMT training, especially when they are appropriately combined.
|
| 30 |
+
|
| 31 |
+
Data augmentation techniques that consume no human translation budget are worth exploring in active NMT training. If the parallel corpus of a related language pair is available, transfer learning (Zoph et al., 2016; Kim et al., 2019) might be a good choice. Otherwise, we propose a new training framework that integrates active learning with iterative back-translation (IBT) (Hoang et al., 2018). We achieve success in both the settings, especially when active learning bonds with IBT.
|
| 32 |
+
|
| 33 |
+
The main contributions of this work are listed as follows: 1) To the best of our knowledge, we are the first to give a comprehensive study of active learning in NMT under various settings. 2) We propose a word frequency based acquisition function which is model agnostic and effective. This acquisition function can further enhance existing uncertainty based methods, achieving even better results in all settings. 3) We design a new training framework for active iterative back-translation as well as a simple data augmentation technique. With a human translation budget of only $20\%$ of the original parallel corpus, we can achieve better BLEU scores than the fully supervised Transformer does (Vaswani et al., 2017).
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
Figure 1: (a) shows the diagram of vanilla supervised NMT training. A parallel corpus is available and used to train the model. (b) shows active NMT training. An acquisition function can use the model to score each sentence in the source side monolingual corpus. A parallel corpus is gradually constructed by employing an oracle (human translator) to translate the sentences with high scores. (c) shows active iterative back-translation. An acquisition function can use $Model_{A \to B}$ to score the untranslated sentences in language A. One part of the high score sentences are translated by an oracle (new parallel corpus), another part are translated by $Model_{A \to B}$ (new synthetic corpus). New parallel corpus and new synthetic corpus are used for training $Model_{B \to A}$ and vice versa.
|
| 37 |
+
|
| 38 |
+
# 2 Related Work
|
| 39 |
+
|
| 40 |
+
Active learning As for natural language processing, active learning is well studied in text classification (Zhang et al., 2017; Ru et al., 2020) and named entity recognition (Shen et al., 2017; Siddhant and Lipton, 2018; Prabhu et al., 2019). Peris and Casacuberta (2018) applied attention based acquisition functions for NMT. Liu et al. (2018) introduced reinforcement learning to actively train an NMT model.
|
| 41 |
+
|
| 42 |
+
Data selection in NMT Although active learning has not been thoroughly studied in NMT, the related data selection problem attracts some attention. van der Wees et al. (2017); Wang et al. (2018a) deliberately designed weighted sampling methods, which accelerates training and improves performance. Wang et al. (2018b); Pham et al. (2018) focused on noisy data, coming up with algorithms to filter harmful sentence pairs. Wang et al. (2019) simultaneously dealt with domain data selection and clean data selection. Fadaee and Monz (2018); Poncelas et al. (2019); Dou et al. (2020) considered domain data selection in back-translation. Wang and Neubig (2019) proposed a method to select relevant sentences from other languages to bring performance gains in low resource NMT. Furthermore, Ruiter et al. (2019) tried to extract possible
|
| 43 |
+
|
| 44 |
+
parallel data from bilingual Wikipedia.
|
| 45 |
+
|
| 46 |
+
Interactive NMT Interactive NMT exploits user feedback to help improve translation systems. Realworld (Kreutzer et al., 2018) or simulated user feedback includes highlighting accurate translation chunks (Petrushkov et al., 2018) or correct errors made by machine (Peris and Casacuberta, 2018; Domingo et al., 2019). Kreutzer and Riezler (2019) took the cost of different types of supervision (feedback) into account, which resembles the idea of active learning.
|
| 47 |
+
|
| 48 |
+
# 3 Methodology
|
| 49 |
+
|
| 50 |
+
We give a detailed description of active neural machine translation (NMT) in this section. Basic settings and some terminologies are introduced in Section 3.1. In Section 3.2 and Section 3.3, various acquisition functions are presented and explained. Section 3.4 deals with combining active learning with transfer learning and iterative backtranslation. Figure 1 is an illustration of different training frameworks in NMT.
|
| 51 |
+
|
| 52 |
+
# 3.1 Active NMT
|
| 53 |
+
|
| 54 |
+
Several terminologies need to be clarified before introducing the active NMT circulation, namely, acquisition function, oracle and budget.
|
| 55 |
+
|
| 56 |
+
Acquisition Function An acquisition function gives a score to each untranslated sentence in the monolingual corpus. Sentences with higher scores are more likely to be selected as the training corpus. Acquisition functions fall into two types, model related and model agnostic. A model related acquisition function takes a sentence as the model input and gives a score depending on the model output. A model agnostic acquisition function often concerns about the informativeness of the sentence itself, which can score each sentence before training the model.
|
| 57 |
+
|
| 58 |
+
Oracle An oracle is a gold standard for a machine learning task. For NMT, an oracle can output the ground truth translation given a source sentence (specifically an expert human translator). A parallel corpus is gradually constructed by employing an oracle to translate the selected sentences.
|
| 59 |
+
|
| 60 |
+
Budget Budget means the total cost one can afford to employ an oracle. For NMT, we need to hire human experts to translate sentences. In order to simulate active NMT training, throughout all our experiments, the cost is the number of words been translated.
|
| 61 |
+
|
| 62 |
+
In the beginning, we have a large-scale monolingual corpus of the source language. We do several rounds of active training until the total budget is used up. In each round, five steps are taken:
|
| 63 |
+
|
| 64 |
+
- Use an acquisition function to score each untranslated sentence.
|
| 65 |
+
- Sort the untranslated sentences according to the scores in descending order.
|
| 66 |
+
- Select high score untranslated sentences until the token budget in this round is used up.
|
| 67 |
+
- Remove the selected sentences from the monolingual corpus and employ an oracle to translate them.
|
| 68 |
+
- Add these new sentence pairs to the parallel corpus and retrain the NMT model.
|
| 69 |
+
|
| 70 |
+
Transformer is what we use throughout our experiments. As this architecture is commonly used and our implementation has little difference with the original, we skip an exhaustive background description of the underlying model. One can refer to Vaswani et al. (2017) for some details. The active NMT training circulation is shown in part (b) of Figure 1.
|
| 71 |
+
|
| 72 |
+
# 3.2 Model Related Acquisition Functions
|
| 73 |
+
|
| 74 |
+
All model related acquisition functions we try are based on uncertainty. Settles and Craven (2008) tried these methods on sequence labeling tasks. For NMT, we use greedy decoding to generate a synthetic translation of each sentence $x = (x_{1},\dots ,x_{n})$ in the monolingual corpus $U$ . We denote this synthetic translation as $\hat{y} = (\hat{y}_1,\dots ,\hat{y}_m)$ . In the $i^{\mathrm{th}}$ decoding step, the model outputs a probability distribution over the entire vocabulary $P_{\theta}(\cdot |x,\hat{y}_{< i})$ .
|
| 75 |
+
|
| 76 |
+
Least Confident (lc) A direct interpretation of model uncertainty is the average confidence level on the generated translation. We strengthen the model on its weaknesses and force it to learn more on intrinsically hard sentences.
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\frac {1}{m} \sum_ {i = 1} ^ {m} \left[ 1 - P _ {\theta} \left(\hat {y} _ {i} \mid x, \hat {y} _ {< i}\right) \right] \tag {1}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
Minimum Margin (margin) Margin means the average probability gap between the model's most confident word $y_{i,1}^{*}$ and second most confident word $y_{i,2}^{*}$ in each decoding step. With a small margin, the model is unable to distinguish the best translation from an inferior one.
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
- \frac {1}{m} \sum_ {i = 1} ^ {m} \left[ P _ {\theta} \left(y _ {i, 1} ^ {*} \mid x, \hat {y} _ {< i}\right) - P _ {\theta} \left(y _ {i, 2} ^ {*} \mid x, \hat {y} _ {< i}\right) \right] \tag {2}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
Token Entropy (te) Concentrated distributions tend to have low entropy. Entropy is also an appropriate measurement of uncertainty. In NMT, we calculate the average entropy in each decoding step as given by the following equation.
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\frac {1}{m} \sum_ {i = 1} ^ {m} \text {e n t r o p y} \left(P _ {\theta} (\cdot | x, \hat {y} _ {< i}))\right) \tag {3}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
Total Token Entropy (tte) To avoid favoring long sentences, we choose to take average over sentence length in the above three methods. However, it remains a question whether querying long sentences should be discouraged. We design an acquisition function to figure out this issue by removing the $\frac{1}{m}$ term from Token Entropy.
|
| 95 |
+
|
| 96 |
+
# 3.3 Model Agnostic Acquisition Functions
|
| 97 |
+
|
| 98 |
+
Uncertainty based acquisition functions depend purely on probability. We propose a model agnostic acquisition function that focuses on linguistic features. In NMT, it is important to enable the model
|
| 99 |
+
|
| 100 |
+
Algorithm 1 Decay Logarithm Frequency Acquisition Function
|
| 101 |
+
Input: Selected Corpus $L$ , Untranslated Corpus $U$ Token Budget $b$ Positive Constants $\lambda_{1},\lambda_{2}$
|
| 102 |
+
Output: New Selected Sentences $B$
|
| 103 |
+
1: $B = \emptyset ;\hat{U} = \emptyset$
|
| 104 |
+
2: for s in U do
|
| 105 |
+
3: calculate $lf(s)$ by Equation (6)
|
| 106 |
+
4: end for
|
| 107 |
+
5: for s in sort(U) by $lf$ score do
|
| 108 |
+
6: calculate $delfy(s)$ by Equation (7)
|
| 109 |
+
7: $\hat{U} = \hat{U}\cup \{\mathrm{s}\}$
|
| 110 |
+
8: end for
|
| 111 |
+
9: for s in sort(U) by delfy score do
|
| 112 |
+
10: if Cost(BU{S})>b then
|
| 113 |
+
11: break
|
| 114 |
+
12: end if
|
| 115 |
+
13: $B = B\cup \{\mathrm{s}\}$
|
| 116 |
+
14: end for
|
| 117 |
+
|
| 118 |
+
to translate unseen future sentences. In other words, we wish to choose those sentences that are representatives of all the untranslated sentences but less similar with what has previously been selected.
|
| 119 |
+
|
| 120 |
+
In each active training round, we have a set of untranslated sentences in the source language side, which is denoted as $U$ . Also, those sentences that have been selected in previous active training rounds are denoted as $L$ . We denote a sentence as $s = (s_1, \dots, s_K)$ which is different from what it is in Section 3.2 because we are now working on word level instead of the subword level. First, we define the logarithm frequency of a word $w$ in $U$ , namely, $F(w|U)$ .
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
G (w | U) = \log (C (w | U) + 1) \tag {4}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
F (w | U) = \frac {G (w | U)}{\sum_ {w ^ {\prime} \in U} G \left(w ^ {\prime} \mid U\right)} \tag {5}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
Where $C(w|\cdot)$ means the occurrence number of a word $w$ in a certain sentence set.
|
| 131 |
+
|
| 132 |
+
As shown in Equation (6), the representativeness of a sentence $s$ is determined by its average logarithm word frequency in $U$ . A decay factor $\lambda_{1} \geq 0$ is introduced to assist the model to pay more attention to the uncommon words in the previously selected corpus $L$ .
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
l f (s) = \frac {\sum_ {i = 1} ^ {K} F \left(s _ {i} \mid U\right) \times e ^ {- \lambda_ {1} C \left(s _ {i} \mid L\right)}}{K} \tag {6}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
Directly using $lf$ scores is problematic. The algorithm favors a small number of function words (like "a", "the") which account for a high proportion of the entire corpus. Also, redundancy breaks out since sentences of similar content share similar scores. These two drawbacks are disastrous for building a well-performing translation system.
|
| 139 |
+
|
| 140 |
+
A gradual reranking is used to ease these two problems. Equation (6) is employed for the first round of sorting. $\hat{U} (s)$ is the set of all sentences that have a higher $lf$ score than $s$ . If $s$ has a high $lf$ score, but each word $s_i$ in $s$ frequently appears in $\hat{U} (s)$ , we use a decay term $e^{-\lambda_2C(s_i|\hat{U}(s))}$ to cut down its score. In this way, we tend to discard repetitive sentences and filter out insignificant function words. Details can be found in Equations (7) and (8). $\lambda_{1}$ and $\lambda_{2}$ are non-negative constants.
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
d e l f y (s) = \frac {\sum_ {i = 1} ^ {K} F \left(s _ {i} \mid U\right) \times D e c a y \left(s _ {i}\right)}{K} \tag {7}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
D e c a y \left(s _ {i}\right) = e ^ {- \lambda_ {1} C \left(s _ {i} \mid L\right)} \times e ^ {- \lambda_ {2} C \left(s _ {i} \mid \hat {U} (s)\right)} \tag {8}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
We name this model agnostic acquisition function as decay logarithm frequency (delfy) which is summarized in Algorithm 1.
|
| 151 |
+
|
| 152 |
+
# 3.4 Active NMT with Data Augmentation
|
| 153 |
+
|
| 154 |
+
Directly incorporating active learning into NMT can be beneficial. However, is there any technique that consumes no extra budget to further improve translation performance? The answer depends on the availability of some related parallel corpus. Transferring knowledge from a related language pair can be considered if an extra parallel corpus is available. Iterative back-translation is worth trying if not.
|
| 155 |
+
|
| 156 |
+
Transfer Learning We assume that there exists a rich parallel corpus in a related translation direction, e.g., we try to build a German-English NMT system and we have access to French-English sentence pairs. The model is initialized by training on this related parallel corpus. Active NMT training is carried out as described in Section 3.1 after model initialization.
|
| 157 |
+
|
| 158 |
+
Iterative Back-Translation Iterative back-translation (IBT) (Sennrich et al., 2016a; Hoang et al., 2018) proves to be of help in boosting model performance. IBT offers a data augmentation technique that is budget free (no human translator needed) when considering active NMT training. However, simply using all monolingual corpus
|
| 159 |
+
|
| 160 |
+
Algorithm 2 The Framework for Active Iterative Back-Translation (IBT)
|
| 161 |
+
Input: Active IBT Rounds $R$ Parallel Corpus $L = \{L_A,L_B\}$ Monolingual Corpus $U_{A},U_{B}$ ; Initialized NMT Model $M_{A\rightarrow B}$ $M_{B\rightarrow A}$ Acquisition Function $\Phi$ Token Budget $b$ ,Oracle $O$ Token Number in Synthetic Sentences $\alpha$ Output: $M_{A\to B}$ $M_{B\to A}$ . 1: for i in 1 to R do 2: $\overrightarrow{A_i} = \Phi (U_A,L_A,M_A\rightarrow B,b)$ 3: $\vec{B}_i = O(\vec{A_i})$ . $U_{A} = U_{A}\setminus \vec{A}_{i}$ 4: $\vec{P_i} = \Phi (U_A,L_A,M_A\rightarrow B,\alpha)$ 5: $\vec{Q_i} = M_{A\rightarrow B}(\vec{P_i})$ 6: $L_{A} = L_{A}\cup \vec{A_{i}}$ $L_{B} = L_{B}\cup \vec{B_{i}}$ 7: Train $M_{B\rightarrow A}$ on $\{(L_B\cup \vec{Q_i}),(L_A\cup \vec{P_i})\}$ 8: $\vec{B_i} = \Phi (U_B,L_B,M_B\rightarrow A,b)$ 9: $\vec{A_i} = O(\vec{B_i})$ . $U_{B} = U_{B}\setminus \vec{B_{i}}$ 10: $\vec{Q_i} = \Phi (U_B,L_B,M_B\rightarrow A,\alpha)$ 11: $\vec{P_i} = M_{B\rightarrow A}(\vec{Q_i})$ 12: $L_{A} = L_{A}\cup \vec{A_{i}}$ $L_{B} = L_{B}\cup \vec{B_{i}}$ 13: Train $M_{A\rightarrow B}$ on $\{(L_A\cup \vec{P_i}),(L_B\cup \vec{Q_i})\}$ 14: end for
|
| 162 |
+
|
| 163 |
+
to generate a synthetic parallel corpus will hurt instead of improving the model performance. We designed some experiments to validate this argument. Detailed results can be seen in Appendix B.
|
| 164 |
+
|
| 165 |
+
Two reasons may cause these poor results. First, the quality of synthetic corpus varies. Some of the synthetic sentence pairs can be beneficial, while others only introduce chaos into the NMT model. Second, the percentage of the synthetic corpus in the entire training corpus is too high. To cope with these two problems, we propose a new Active IBT framework. Models of opposite translation directions are responsible for constructing training corpus for each other. Sentences with the highest acquisition function scores are divided into two parts. One part is translated by an oracle to enrich the parallel corpus. Another part is used to generate a new synthetic corpus. In this way, we manage to control the quality as well as the percentage of the synthetic corpus.
|
| 166 |
+
|
| 167 |
+
This framework is shown in part (c) of Figure 1, and some details can be found in Algorithm 2.
|
| 168 |
+
|
| 169 |
+
Algorithm 3 Active IBT++ (LAN A to LAN B)
|
| 170 |
+
Input: Active IBT Rounds R; Merge Number $k_{1}$ , $k_{2}$ Final Parallel Corpus $L^{++} = \{L_{A}, L_{B}\}$ ; $M_{A \rightarrow B, i}$ , $M_{B \rightarrow A, i}$ , $i \in \{1, 2, \dots, R\}$ ; Synthetic Corpus $\overleftarrow{P_{i}}$ , $\overleftarrow{Q_{i}}$ , $i \in \{1, 2, \dots, R\}$ ; Output: $M_{A \rightarrow B}$ ;
|
| 171 |
+
1: for j in 1 to $k_{1}$ do
|
| 172 |
+
2: $\tilde{L}_{A,j} = M_{B \rightarrow A, R - j + 1}(L_{B})$ ;
|
| 173 |
+
3: $\tilde{L}_{B,j} = M_{A \rightarrow B, R - j + 1}(L_{A})$ ;
|
| 174 |
+
4: $L^{++} = L^{++} \cup \{\tilde{L}_{A,j}, L_{B}\} \cup \{L_{A}, \tilde{L}_{B,j}\}$
|
| 175 |
+
5: end for
|
| 176 |
+
6: for j in 1 to $k_{2}$ do
|
| 177 |
+
7: $L^{++} = L^{++} \cup \{\overleftarrow{P}_{R - j + 1}, \overleftarrow{Q}_{R - j + 1}\}$
|
| 178 |
+
8: end for
|
| 179 |
+
9: $M_{A \rightarrow B} = \text{Retrain } M_{A \rightarrow B, 1}$ on $L^{++}$
|
| 180 |
+
|
| 181 |
+
Active IBT++ Active learning aims at choosing informative sentences to train the model. Is there any way that we can exploit more value from these selected sentences? Inspired by Nguyen et al. (2019), we propose some further data augmentation techniques after Active IBT is done. Models of the last $k_{1}$ rounds are used for translating the final parallel corpus, such that each selected sentence will have diversified translations. We merge the diversified parallel corpus with the synthetic corpus of a specific translation direction in the last $k_{2}$ rounds. Duplicate sentence pairs are filtered out. The NMT model is re-initialized and trained on this enlarged training corpus.
|
| 182 |
+
|
| 183 |
+
We name this technique Active IBT++ and summarize it in Algorithm 3. For simplicity, we only consider one translation direction in Algorithm 3. The same technique can be easily done in another translation direction.
|
| 184 |
+
|
| 185 |
+
# 4 Experiments
|
| 186 |
+
|
| 187 |
+
# 4.1 Dataset, Preprocessing and Implementation
|
| 188 |
+
|
| 189 |
+
We experiment on three language pairs, namely, German-English (DE-EN), Russian-English (RU-EN) and Lithuanian-English (LT-EN). To simulate active NMT training, we use parallel corpus from the WMT 2014 shared task (DE-EN, RU-EN) and the WMT 2019 shared task (LT-EN). For Russian-English, we randomly choose extra 2M sentence pairs from the UN corpus<sup>1</sup>. The number of sen
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
(a) news test 2014, DE-EN
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
(b) news test 2014, RU-EN
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
(c) news test 2019, LT-EN
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
(a) news test 2014, DE-EN
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Figure 2: Active NMT, BLEU scores on the test dataset.
|
| 205 |
+
(b) news test 2014, RU-EN
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
(c) news test 2019, LT-EN
|
| 209 |
+
Figure 3: Active NMT with Transfer Learning, BLEU scores on the test dataset.
|
| 210 |
+
|
| 211 |
+
tence pairs in each language pair is 4M (DE-EN), 4M (RU-EN) and 0.8M (LT-EN). Tokenization is done by Moses $^2$ . We employ BPE (Sennrich et al., 2016b) to generate a shared vocabulary for each language pair. The BPE merge operation numbers are 20K (LT-EN), 32K (DE-EN, RU-EN). For active NMT with or without transfer learning, we only experiment on translating into English. Instead, for active iterative back-translation (IBT), evaluation is carried out on translating from English and into English. The evaluation metric is BLEU (Papineni et al., 2002).
|
| 212 |
+
|
| 213 |
+
Model hyper parameters are identical to Transformer base (Vaswani et al., 2017). Adam optimizer (Kingma and Ba, 2014) is used with a learning rate of $7 \times 10^{-4}$ . We use the same learning rate scheduling strategy as Vaswani et al. (2017) does with a warmup step of 4000. During training, the label smoothing factor and the dropout probability are set to 0.1. $\lambda_{1}, \lambda_{2}$ in Algorithm 1 are all set to 1.0.
|
| 214 |
+
|
| 215 |
+
Our implementation is based on pytorch<sup>3</sup>. All models are trained on 8 RTX 2080Ti GPU cards with a mini-batch of 4096 tokens. We stop training
|
| 216 |
+
|
| 217 |
+
if validation perplexity does not decrease for 10 epochs in each active training round.
|
| 218 |
+
|
| 219 |
+
# 4.2 Active NMT
|
| 220 |
+
|
| 221 |
+
As a starting point, we empirically compare different acquisition functions proposed in Section 3.2 and Section 3.3, as well as the uniformly random selection baseline. Twelve rounds of active NMT training are done. In each round, $1.67\%$ of the entire parallel corpus is selected and added into the training corpus. Thus, we ensure the token budget is $20\%$ of the entire parallel corpus in the final round. Training corpus in the first round is identical across different acquisition functions to ensure the fairness of comparison.
|
| 222 |
+
|
| 223 |
+
Results are shown in Figure 2. Most active acquisition functions can outperform the random selection baseline in all three language pairs. Our model agnostic acquisition function (delfy) is also better than the best uncertainty based acquisition function. We try to combine delfy with some well-performing uncertainty based acquisition functions since they represent different aspects of the informativeness of a sentence. We choose to combine delfy with token entropy (te). We add the ranks given by these two acquisition functions to avoid the magnitude problem. For example, if a sentence
|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
(a) news test 2014, DE-EN
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
(b) news test 2014, RU-EN
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
(c) news test 2019, LT-EN
|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
(d) news test 2014, EN-DE
|
| 236 |
+
|
| 237 |
+

|
| 238 |
+
(e) news test 2014, EN-RU
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
(f) news test 2019, EN-LT
|
| 242 |
+
Figure 4: Active Iterative Back-Translation, BLEU scores on the test dataset.
|
| 243 |
+
|
| 244 |
+
gets the highest delfy score as well as the second-highest te score, then its delfy rank is 1 and its te rank is 2, such that its final score is $1 + 2 = 3$ . Since we sort sentences in descending order of their scores, we should multiply the summation of the ranks by $-1$ . This new combined acquisition function is named as te-delfy.
|
| 245 |
+
|
| 246 |
+
Our combined method (te-delfy) proves to be more effective, outperforming all the other acquisition functions in each active NMT training round in all three language pairs. To be more specific, in the last active training round, te-delfy surpasses the best uncertainty based acquisition function by 1.4 BLEU points in DE-EN, 1.6 BLEU points in RU-EN and 1.1 BLEU points in LT-EN.
|
| 247 |
+
|
| 248 |
+
# 4.3 Active NMT with Transfer Learning
|
| 249 |
+
|
| 250 |
+
To evaluate different acquisition functions in active NMT with transfer learning, we start from a French to English NMT model. The parallel corpus for building this initial model contains 4M sentence pairs which are randomly selected from the WMT 2014 shared task. To share vocabulary between different languages, we latinize all the Russian sentences<sup>4</sup>.
|
| 251 |
+
|
| 252 |
+
Figure 3 shows the results. All the active acquisition functions are still advantageous compared with
|
| 253 |
+
|
| 254 |
+
the random selection baseline except total token entropy (tte). Our combined method (te-delfy) is also the best in most active training rounds. Te-delfy yields the best final results, beating the best uncertainty based acquisition function by 0.5 BLEU points in DE-EN, 0.3 BLEU points in RU-EN and 0.5 BLEU points in LT-EN. However, in active NMT with transfer learning, the performance gains brought by different acquisition functions are not as much as it is in active NMT (Section 4.2).
|
| 255 |
+
|
| 256 |
+
# 4.4 Active Iterative Back-Translation
|
| 257 |
+
|
| 258 |
+
For active iterative back-translation (IBT), we randomly select $10\%$ of the entire parallel corpus to train an initial NMT model. The initial model is shared across different acquisition functions. We do 10 rounds of Active IBT training. In each round, $1\%$ of the entire parallel corpus is added into the training corpus. The total token budget is still $20\%$ as in Section 4.2 and Section 4.3. For $\alpha$ in Algorithm 2, we use as many as half of the amount of the authentic parallel corpus in this Active IBT round. $k_{1}$ , $k_{2}$ in Algorithm 3 are set to 3 and 6 respectively.
|
| 259 |
+
|
| 260 |
+
Results are summarized in Figure 4. Our combined method (te-delfy) becomes even more powerful than it is in active NMT, leading all the way until the final round in all the experiments. All active acquisition functions we try surpass the random
|
| 261 |
+
|
| 262 |
+
<table><tr><td>Method</td><td>Setting</td><td>DE→EN</td><td>EN→DE</td><td>RU→EN</td><td>EN→RU</td><td>LT→EN</td><td>EN→LT</td></tr><tr><td>Transformer Base</td><td>Entire Corpus</td><td>32.5</td><td>27.3</td><td>33.9</td><td>36.6</td><td>24.2</td><td>20.3</td></tr><tr><td>Random</td><td>Active IBT</td><td>29.4</td><td>23.6</td><td>28.4</td><td>30.5</td><td>21.2</td><td>15.7</td></tr><tr><td>Best Uncertainty</td><td>Active IBT</td><td>31.5</td><td>25.5</td><td>32.1</td><td>33.9</td><td>23.0</td><td>19.5</td></tr><tr><td>Delfy (Ours)</td><td>Active IBT</td><td>31.3</td><td>26.1</td><td>32.0</td><td>34.4</td><td>23.6</td><td>20.0</td></tr><tr><td>Te-delfy (Ours)</td><td>Active IBT</td><td>31.9</td><td>26.9</td><td>33.5</td><td>36.1</td><td>23.8</td><td>20.3</td></tr><tr><td>Te-delfy (Ours)</td><td>Active IBT++</td><td>32.8</td><td>27.4</td><td>35.0</td><td>37.4</td><td>25.4</td><td>21.3</td></tr></table>
|
| 263 |
+
|
| 264 |
+
Table 1: Comparison between Active IBT models in the final round, Active IBT++ models and the full supervision Transformer. Best results are all achieved by Te-delfy. The token budget is $20\%$ of the entire parallel corpus.
|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
(a) DE-EN
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
Figure 5: Text analysis of selected sentences, including average sentence length, vocabulary coverage and MTLD score.
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
(b) RU-EN
|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
(c) LT-EN
|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
|
| 292 |
+

|
| 293 |
+
|
| 294 |
+
baseline by a large margin, with a minimum performance gain of 1.1 BLEU points. We argue that synthetic sentence pairs need more sophisticated selection criteria than the authentic ones. Low-quality pseudo-parallel data can damage rather than help the model performance.
|
| 295 |
+
|
| 296 |
+
We make a comparison between the actively learned models and the full supervision Transformer in Table 1. The best results are all achieved by te-delfy which further proves its superiority. Active IBT++ (Algorithm 3) is applied with te-delfy. With a token budget of $20\%$ of the entire parallel corpus, we can surpass the vanilla Transformer in every translation direction. These results show that Active IBT and Active IBT++ are promising approaches for enhancing NMT models.
|
| 297 |
+
|
| 298 |
+
# 5 Analysis
|
| 299 |
+
|
| 300 |
+
# 5.1 Linguistic Features
|
| 301 |
+
|
| 302 |
+
In order to find the common features of the beneficial sentences in translation, we analyze the final parallel corpus constructed by different acquisition functions in active NMT from four aspects. All the analyses are done on word level instead of the subword level. First, we study the impact of the average sentence length. Second, we study
|
| 303 |
+
|
| 304 |
+
the vocabulary coverage by calculating the ratio of the vocabulary size of the selected corpus to the total/test vocabulary size. Finally, the lexical diversity of the selected corpus is analyzed based on the MTLD metric (McCarthy and Jarvis, 2010). Analyses are done on random selection, the best uncertainty based method, delfy and te-delfy. The results are shown in Figure 5.
|
| 305 |
+
|
| 306 |
+
Most algorithms tend to choose some medium-length sentences, rather than the extremely long or short ones. We also use sentence length as our acquisition function (choosing the longest or shortest sentences), which proves to be terrible (Appendix A). Vocabulary coverage varies among different acquisition functions, with random selection always being the lowest one. Higher vocabulary coverage means fewer unseen words which might create a more knowledgeable model. Also, delfy and te-delfy always achieve higher MTLD scores than the other two methods do. Note that a higher vocabulary coverage does not necessarily mean a higher diversity score. In LT-EN and RU-EN, delfy always has a larger vocabulary size than te-delfy, but its selected corpus is less diverse. In general, a good acquisition function should favor medium-length sentences as well as having a large vocabulary cov
|
| 307 |
+
|
| 308 |
+
erage. Meanwhile, diversified training corpus is also beneficial to model performance.
|
| 309 |
+
|
| 310 |
+
<table><tr><td>Methods</td><td>Easy→Hard</td><td>Hard→Easy</td></tr><tr><td>lc</td><td>16.0</td><td>17.5</td></tr><tr><td>margin</td><td>16.3</td><td>18.3</td></tr><tr><td>te</td><td>15.9</td><td>18.7</td></tr><tr><td>tte</td><td>16.1</td><td>18.6</td></tr><tr><td>delfy</td><td>16.9</td><td>19.1</td></tr><tr><td>te-delfy</td><td>16.0</td><td>19.8</td></tr></table>
|
| 311 |
+
|
| 312 |
+
Table 2: We validate the necessity of active learning when there is a limited human translation budget. Hard $\rightarrow$ Easy corresponds to active learning. Easy $\rightarrow$ Hard represents reverse active learning. We experiment on EN-LT with a token budget of $20\%$ of the entire parallel corpus. Active learning results are always better than reverse active learning results.
|
| 313 |
+
|
| 314 |
+
# 5.2 Reverse Active learning
|
| 315 |
+
|
| 316 |
+
Active learning chooses difficult samples for the model. Instead, several curriculum learning methods (Zhang et al., 2018; Platanios et al., 2019; Liu et al., 2020; Zhou et al., 2020) accelerates model convergence, which starts training with easy data samples and gradually moves to hard ones. Curriculum learning's success makes it reasonable to think about whether the reverse of active learning is also beneficial. Reverse active learning selects sentences with the lowest acquisition function scores in each round. We make a comparison between active learning and reverse active learning in Table 2. Reverse active learning lags behind active learning with all acquisition functions we try. Also, reverse active learning can not beat the random baseline of 18.5 BLEU points. Curriculum learning emphasizes the training process of networks (easy to hard), which might accelerate convergence. However, when the amount of training data is limited, active learning is a better choice.
|
| 317 |
+
|
| 318 |
+
# 6 Conclusion
|
| 319 |
+
|
| 320 |
+
Various acquisition functions are conducted on active NMT, active NMT with transfer learning and active iterative back-translation (IBT). Our experiment results strongly prove that active learning is beneficial to NMT. Our combined method (te-delfy) achieves the best final BLEU score in every experiment we do. Also, the proposed Active IBT++ framework efficiently exploits the selected parallel corpus to further enhance the model accuracy. These techniques may also be useful for
|
| 321 |
+
|
| 322 |
+
unsupervised NMT. Active pre-training is worth trying and active IBT has already proven its capability. We leave it for future work to study more acquisition functions in more NMT scenarios.
|
| 323 |
+
|
| 324 |
+
# Acknowledgments
|
| 325 |
+
|
| 326 |
+
Yuekai Zhao and Zhihua Zhang have been supported by the Beijing Natural Science Foundation (Z190001), National Key Research and Development Project of China (No. 2018AAA0101004), and Beijing Academy of Artificial Intelligence (BAAI).
|
| 327 |
+
|
| 328 |
+
# References
|
| 329 |
+
|
| 330 |
+
Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018. Unsupervised neural machine translation. In Proceedings of the Sixth International Conference on Learning Representations.
|
| 331 |
+
Tianchi Bi, Hao Xiong, Zhongjun He, Hua Wu, and Haifeng Wang. 2019. Multi-agent learning for neural machine translation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 856-865, Hong Kong, China. Association for Computational Linguistics.
|
| 332 |
+
Miguel Domingo, Mercedes García-Martínez, Amando Estela, Laurent Bié, Alexandre Helle, Álvaro Peris, Francisco Casacuberta, and Manuerl Herranz. 2019. Demonstration of a neural machine translation system with online learning for translators. arXiv preprint arXiv:1906.09000.
|
| 333 |
+
Zi-Yi Dou, Antonios Anastasopoulos, and Graham Neubig. 2020. Dynamic data selection and weighting for iterative back-translation. ArXiv, abs/2004.03672.
|
| 334 |
+
Marzieh Fadaee and Christof Monz. 2018. Back-translation sampling by targeting difficult words in neural machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 436–446, Brussels, Belgium. Association for Computational Linguistics.
|
| 335 |
+
Jonas Gehring, Michael Auli, David Grangier, Denis Yarats, and Yann N Dauphin. 2017. Convolutional sequence to sequence learning. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pages 1243-1252. JMLR.org.
|
| 336 |
+
Di He, Yingce Xia, Tao Qin, Liwei Wang, Nenghai Yu, Tie-Yan Liu, and Wei-Ying Ma. 2016. Dual learning for machine translation. In Advances in Neural Information Processing Systems, pages 820-828.
|
| 337 |
+
|
| 338 |
+
Vu Cong Duy Hoang, Philipp Koehn, Gholamreza Haffari, and Trevor Cohn. 2018. Iterative backtranslation for neural machine translation. In Proceedings of the 2nd Workshop on Neural Machine Translation and Generation, pages 18-24, Melbourne, Australia. Association for Computational Linguistics.
|
| 339 |
+
Yunsu Kim, Yingbo Gao, and Hermann Ney. 2019. Effective cross-lingual transfer of neural machine translation models without shared vocabularies. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1246–1257, Florence, Italy. Association for Computational Linguistics.
|
| 340 |
+
Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. Cite arxiv:1412.6980Comment: Published as a conference paper at the 3rd International Conference for Learning Representations, San Diego, 2015.
|
| 341 |
+
Julia Kreutzer, Shahram Khadivi, Evgeny Matusov, and Stefan Riezler. 2018. Can neural machine translation be improved with user feedback? arXiv preprint arXiv:1804.05958.
|
| 342 |
+
Julia Kreutzer and Stefan Riezler. 2019. Self-regulated interactive sequence-to-sequence learning. arXiv preprint arXiv:1907.05190.
|
| 343 |
+
Guillaume Lample and Alexis Conneau. 2019. Crosslingual language model pretraining. Advances in Neural Information Processing Systems (NeurIPS).
|
| 344 |
+
Guillaume Lample, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2017. Unsupervised machine translation using monolingual corpora only. arXiv preprint arXiv:1711.00043.
|
| 345 |
+
Ming Liu, Wray Buntine, and Gholamreza Haffari. 2018. Learning to actively learn neural machine translation. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 334-344.
|
| 346 |
+
Xuebo Liu, Houtim Lai, Derek F. Wong, and Lidia S. Chao. 2020. Norm-based curriculum learning for neural machine translation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 427-436, Online. Association for Computational Linguistics.
|
| 347 |
+
Thang Luong, Hieu Pham, and Christopher D. Manning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1412-1421, Lisbon, Portugal. Association for Computational Linguistics.
|
| 348 |
+
Philip M. McCarthy and Scott Jarvis. 2010. Mtld, vocd-d, and hd-d: A validation study of sophisticated approaches to lexical diversity assessment. Behavior Research Methods, 42(2):381-392.
|
| 349 |
+
|
| 350 |
+
Xuan-Phi Nguyen, Shafiq Joty, Wu Kui, and Ai Ti Aw. 2019. Data diversification: An elegant strategy for neural machine translation.
|
| 351 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting on association for computational linguistics, pages 311-318. Association for Computational Linguistics.
|
| 352 |
+
Álvaro Peris and Francisco Casacuberta. 2018. Active learning for interactive neural machine translation of data streams. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 151-160, Brussels, Belgium. Association for Computational Linguistics.
|
| 353 |
+
Pavel Petrushkov, Shahram Khadivi, and Evgeny Matusov. 2018. Learning from chunk-based feedback in neural machine translation. arXiv preprint arXiv:1806.07169.
|
| 354 |
+
Minh Quang Pham, Josep M Crego, Jean Senellart, and François Yvon. 2018. Fixing translation divergences in parallel corpora for neural mt. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2967-2973.
|
| 355 |
+
Emmanouil Antonios Platanios, Otilia Stretcu, Graham Neubig, Barnabas Poczos, and Tom Mitchell. 2019. Competence-based curriculum learning for neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1162-1172, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 356 |
+
Alberto Poncelas, Gideon Maillette de Buy Wenniger, and Andy Way. 2019. Adaptation of machine translation models with back-translated data using transductive data selection methods. arXiv preprint arXiv:1906.07808.
|
| 357 |
+
Ameya Prabhu, Charles Dognin, and Maneesh Singh. 2019. Sampling bias in deep active classification: An empirical study. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4058-4068, Hong Kong, China. Association for Computational Linguistics.
|
| 358 |
+
Dongyu Ru, Yating Luo, Lin Qiu, Hao Zhou, Lei Li, Weinan Zhang, and Yong Yu. 2020. Active sentence learning by adversarial uncertainty sampling in discrete space. ArXiv, abs/2004.08046.
|
| 359 |
+
Dana Ruiter, Cristina Espana-Bonet, and Josef van Genabith. 2019. Self-supervised neural machine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1828-1834.
|
| 360 |
+
|
| 361 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving neural machine translation models with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computational Linguistics.
|
| 362 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715-1725, Berlin, Germany. Association for Computational Linguistics.
|
| 363 |
+
Burr Settles and Mark Craven. 2008. An analysis of active learning strategies for sequence labeling tasks. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, EMNLP '08, page 1070-1079, USA. Association for Computational Linguistics.
|
| 364 |
+
Yanyao Shen, Hyokun Yun, Zachary Lipton, Yakov Kronrod, and Animashree Anandkumar. 2017. Deep active learning for named entity recognition. In Proceedings of the 2nd Workshop on Representation Learning for NLP, pages 252-256, Vancouver, Canada. Association for Computational Linguistics.
|
| 365 |
+
Aditya Siddhant and Zachary C. Lipton. 2018. Deep bayesian active learning for natural language processing: Results of a large-scale empirical study. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2904-2909, Brussels, Belgium. Association for Computational Linguistics.
|
| 366 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.
|
| 367 |
+
Rui Wang, Masao Utiyama, and Eiichiro Sumita. 2018a. Dynamic sentence sampling for efficient training of neural machine translation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 298-304.
|
| 368 |
+
Wei Wang, Isaac Caswell, and Ciprian Chelba. 2019. Dynamically composing domain-data selection with clean-data selection by "co-curricular learning" for neural machine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1282-1292, Florence, Italy. Association for Computational Linguistics.
|
| 369 |
+
Wei Wang, Taro Watanabe, Macduff Hughes, Tetsuji Nakagawa, and Ciprian Chelba. 2018b. Denoising neural machine translation training with trusted data and online data selection. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 133-143, Brussels, Belgium. Association for Computational Linguistics.
|
| 370 |
+
|
| 371 |
+
Xinyi Wang and Graham Neubig. 2019. Target conditioned sampling: Optimizing data selection for multilingual neural machine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5823-5828, Florence, Italy. Association for Computational Linguistics.
|
| 372 |
+
Marlies van der Wees, Arianna Bisazza, and Christof Monz. 2017. Dynamic data selection for neural machine translation. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 1400-1410, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 373 |
+
Felix Wu, Angela Fan, Alexei Baevski, Yann N. Dauphin, and Michael Auli. 2019. Pay less attention with lightweight and dynamic convolutions. CoRR, abs/1901.10430.
|
| 374 |
+
Xuan Zhang, Gaurav Kumar, Huda Khayrallah, Kenton Murray, Jeremy G Winnup, Marianna J Martindale, Paul McNamee, Kevin Duh, and Marine Carpuat. 2018. An empirical exploration of curriculum learning for neural machine translation. arXiv preprint arXiv:1811.00739.
|
| 375 |
+
Ye Zhang, Matthew Lease, and Byron C Wallace. 2017. Active discriminative text representation learning. In Thirty-First AAAI Conference on Artificial Intelligence.
|
| 376 |
+
Yikai Zhou, Baosong Yang, Derek F. Wong, Yu Wan, and Lidia S. Chao. 2020. Uncertainty-aware curriculum learning for neural machine translation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6934-6944, Online. Association for Computational Linguistics.
|
| 377 |
+
Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016. Transfer learning for low-resource neural machine translation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1568-1575, Austin, Texas. Association for Computational Linguistics.
|
activelearningapproachestoenhancingneuralmachinetranslation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aeeaca59e5aca53d71669d2189db8997ed2d84e3ab0cc44a16d6f4bc87839cdc
|
| 3 |
+
size 476322
|
activelearningapproachestoenhancingneuralmachinetranslation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ef618346f7303b2ceccc68b0b390dfa3cffe90456c63aff9cb604cedde9a7742
|
| 3 |
+
size 448075
|
activesentencelearningbyadversarialuncertaintysamplingindiscretespace/2b4fe128-e55a-47fa-b1d6-7f9a308c4815_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d9f5e95fa94de0847c62582ed29596446d92a2fbf33f329be47a5861af3dcfd
|
| 3 |
+
size 65299
|
activesentencelearningbyadversarialuncertaintysamplingindiscretespace/2b4fe128-e55a-47fa-b1d6-7f9a308c4815_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c11e05eb4532bdd3f422b93213a0505d3f67ef684ef370700a7846a6233a87ce
|
| 3 |
+
size 79311
|
activesentencelearningbyadversarialuncertaintysamplingindiscretespace/2b4fe128-e55a-47fa-b1d6-7f9a308c4815_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:18e1fc9bdb4b8188de0c75c8eb99eb259254294debcfce78d009545ea7ca6ee7
|
| 3 |
+
size 370240
|
activesentencelearningbyadversarialuncertaintysamplingindiscretespace/full.md
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Sentence Learning by Adversarial Uncertainty Sampling in Discrete Space
|
| 2 |
+
|
| 3 |
+
Dongyu Ru†,‡, Jiangtao Feng†, Lin Qiu‡, Hao Zhou†, Mingxuan Wang†, Weinan Zhang‡, Yong Yu‡, Lei Li† †ByteDance AI Lab
|
| 4 |
+
|
| 5 |
+
{fengjiangtao, zhouhao.nlp, wangmingxuan.89, lileilab}@bytedance.com
|
| 6 |
+
|
| 7 |
+
$^{\ddagger}$ Shanghai Jiao Tong University
|
| 8 |
+
|
| 9 |
+
{maxru,lqiu,wnzhang,yyu}@apex.sjtu.edu.cn
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Active learning for sentence understanding aims at discovering informative unlabeled data for annotation and therefore reducing the demand for labeled data. We argue that the typical uncertainty sampling method for active learning is time-consuming and can hardly work in real-time, which may lead to ineffective sample selection. We propose adversarial uncertainty sampling in discrete space (AUSDS) to retrieve informative unlabeled samples more efficiently. AUSDS maps sentences into latent space generated by the popular pre-trained language models, and discover informative unlabeled text samples for annotation via adversarial attack. The proposed approach is extremely efficient compared with traditional uncertainty sampling with more than 10x speedup. Experimental results on five datasets show that AUSDS outperforms strong baselines on effectiveness.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Deep neural models become popular in natural language processing (Peters et al., 2018; Radford et al., 2018; Devlin et al., 2018). Neural models usually consume massive labeled data, which requires a huge quantity of human labors. But data are not born equal, where informative data with high uncertainty are decisive to decision boundary and are worth labeling. Thus selecting such worth-labeling data from unlabeled text corpus for annotation is an effective way to reduce the human labors and to obtain informative data.
|
| 18 |
+
|
| 19 |
+
Active learning approaches are a straightforward choice to reduce such human labors. Previous works, such as uncertainty sampling (Lewis and Gale, 1994), needs to traverse all unlabeled data to find informative unlabeled samples, which are always near the decision boundary with large entropy. However, the traverse process is very
|
| 20 |
+
|
| 21 |
+
time-consuming, thus cannot be executed frequently (Settles and Craven, 2008). A common choice is to perform the sampling process after every specific period, and it samples and labels informative unlabeled data then trains the model until convergence (Deng et al., 2018).
|
| 22 |
+
|
| 23 |
+
We argue that infrequently performing uncertainty sampling may lead to the "ineffective sampling" problem. Because in the early phase of training, the decision boundary changes quickly, which makes previously collected samples less effective after several updates of the model. Ideally, uncertainty sampling should be performed frequently in the early phase of model training.
|
| 24 |
+
|
| 25 |
+
In this paper, we propose the adversarial uncertainty sampling in discrete space (AUSDS) to address the ineffective sampling problem for active sentence learning by introducing more frequent sampling with significantly lower costs. Specifically, we propose to leverage the adversarial attack (Goodfellow et al., 2014; Kurakin et al., 2016) to the selecting of informative samples with high uncertainty, which significantly narrows down the search space. Fig. 1 shows the difference between uncertainty sampling and AUSDS. The typical uncertainty sampling (Fig. 1.a) traverses all the unlabeled samples to obtain samples of high uncertainty for each sampling run, which is costly with time complexity $O(\text{Unlabeled Data Size})$ . AUSDS (Fig. 1.b) first projects a labeled text to the decision boundary, denoted as an adversarial data point, and searches nearest neighbors of this point. The computational cost of AUSDS is significantly smaller than typical uncertainty sampling with the time complexity $O(\text{Batch Size})$ . But it is non-trivial for AUSDS to perform adversarial attacks, which requires adversarial gradients on sentences, since texts live in a discrete space. We propose to include a pre-trained neural encoder, such as BERT (Devlin et al., 2018), to map unlabeled sentences into
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
(a) Uncertainty sampling: enumerating all unlabeled data to find the most uncertain samples
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
(b) AUSDS: adversarial attack over current batch + KNN search in pre-trained LM space
|
| 32 |
+
Figure 1: Comparison between uncertainty sampling and AUSDS for active learning.
|
| 33 |
+
|
| 34 |
+
a continuous space, over which the adversarial attack is performed. Since not every adversarial data point in the encoding space can be mapped back to one of the unlabeled sentences, we propose to use the k-nearest neighbor (KNN) algorithm (Altman, 1992) to find the most similar unlabeled sentences (the adversarial samples) to the adversarial data points.Besides, empirically, we mix some random samples into the uncertainty samples to alleviate the sampling bias issue mentioned by (Huang et al., 2010). Finally, the mixed samples are sent to an oracle annotator to obtain their label and are appended to the labeled data set.
|
| 35 |
+
|
| 36 |
+
We deploy AUSDS for active sentence learning and conduct experiments on five datasets across two NLP tasks, namely sequence classification and sequence labeling. Experimental results show that AUSDS outperforms random sampling and uncertainty sampling strategies.
|
| 37 |
+
|
| 38 |
+
Our contributions are summarized as follows:
|
| 39 |
+
|
| 40 |
+
- We propose AUSDS for active sentence learning, which first introduces the adversarial attack for sentence uncertainty sampling, alleviating the ineffective sampling problem.
|
| 41 |
+
- We propose to map sentences into the pretrained LM encoding space, which makes adversarial uncertainty sampling available in the discrete sentence space.
|
| 42 |
+
- Experimental results demonstrate that our active sentence learning framework by AUSDS, which we call AUSDS learning framework, outperforms strong baselines in sampling effectiveness with acceptable running time.
|
| 43 |
+
|
| 44 |
+
# 2 Related Work
|
| 45 |
+
|
| 46 |
+
This work focuses on reducing the labeled data size with the help of pre-trained LM in solving sentence learning tasks. The proposed AUSDS approach is related to two different research topics, active learning and adversarial attack.
|
| 47 |
+
|
| 48 |
+
# 2.1 Active Learning
|
| 49 |
+
|
| 50 |
+
Active learning algorithms can be categorized into three scenarios, namely membership query synthesis, stream-based selective sampling, and pool-based active learning (Settles, 2009). Our work is more related to pool-based active learning, which assumes that there is a small set of labeled data and a large pool of unlabeled data available (Lewis and Gale, 1994). To reduce the demand for more annotations, the learner starts from the labeled data and selects one or more queries from the unlabeled data pool for the annotation, then learns from the new labeled data and repeats.
|
| 51 |
+
|
| 52 |
+
The pool-based active learning scenario has been studied in many real-world applications, such as text classification (Lewis and Gale, 1994; Hoi et al., 2006), information extraction (Settles and Craven, 2008) and image classification (Joshi et al., 2009). Among the query strategies of existing active learning approaches, the uncertainty sampling strategy (Joshi et al., 2009; Lewis and Gale, 1994) is the most popular and widely used. The basic idea of uncertainty sampling is to enumerate the unlabeled samples and compute the uncertainty measurement like information entropy for each sample. The enumeration and uncertainty computation makes the sampling process costly and cannot be performed frequently, which induced the ineffective sampling problem.
|
| 53 |
+
|
| 54 |
+
There are some works that focus on accelerating
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
Figure 2: Overview of active sentence learning framework by AUSDS. Some notations are labeled along with corresponding components.
|
| 58 |
+
|
| 59 |
+
the costly uncertainty sampling process. Jain et al. (2010) propose a hashing method to accelerate the sampling process in sub-linear time. Deng et al. (2018) propose to train an adversarial discriminator to select informative samples directly and avoid computing the rather costly sequence entropy. Nevertheless, the above works are still computationally expensive and cannot be performed frequently, which means the ineffective sampling problem still exists.
|
| 60 |
+
|
| 61 |
+
# 2.2 Adversarial Attack
|
| 62 |
+
|
| 63 |
+
Adversarial attacks are originally designed to approximate the smallest perturbation for a given latent state to cross the decision boundary (Goodfellow et al., 2014; Kurakin et al., 2016). As machine learning models are often vulnerable to adversarial samples, adversarial attacks have been used to serve as an important surrogate to evaluate the robustness of deep learning models before they are deployed (Biggio et al., 2013; Szegedy et al., 2013). Existing adversarial attack approaches can be categorized into three groups, which are one-step gradient-based approaches (Goodfellow et al., 2014; Rozsa et al., 2016), iterative methods (Kurakin et al., 2016) and optimization-based methods (Szegedy et al., 2013).
|
| 64 |
+
|
| 65 |
+
Inspired by the similar goal of adversarial attacks and uncertainty sampling, in this paper, instead of considering adversarial attacks as a threat, we propose to combine these two approaches for achieving real-time uncertainty sampling. Some works share a similar but different idea with us. Li et al. (2018) introduce active learning strategies into black-box attacks to enhance query efficiency. Pal et al. (2020) also use active learning strate
|
| 66 |
+
|
| 67 |
+
gies to reduce the number of queries for model extraction attacks. Zhu and Bento (2017) propose to train Generative Adversarial Networks to generate samples by minimizing the distance to the decision boundary directly, which is in the query synthesis scenario different from us. Ducoffe and Precioso (2018) also introduce adversarial attacks into active learning by augmenting the training set with adversarial samples of unlabeled data, which is infeasible in discrete space. Note that none of the works above share the same scenario with our problem setting.
|
| 68 |
+
|
| 69 |
+
# 3 Active Sentence Learning with AUSDS
|
| 70 |
+
|
| 71 |
+
We propose AUSDS learning framework, an efficient and effective computational framework for active sentence learning. The overview of the learning framework is shown in Fig. 2. The learning framework consists of two blocks, a training block and a sampling block AUSDS. The training block learns knowledge from the labeled data, whereas the sampling block retrieves valuable unlabeled samples, whose latent states are close to the decision boundary over the latent space, from the unlabeled text corpus. Note that the definition of latent spaces can be different across encoders and tasks. The samples retrieved by the sampling block will be further sent to an oracle annotator to obtain their label, and the new samples with labels are also appended to the labeled data.
|
| 72 |
+
|
| 73 |
+
In this section, we first introduce AUSDS method by showing how AUSDS select samples that are critical to the decision boundary over the latent space. Then we present the computational procedure of the full-fledged framework in detail.
|
| 74 |
+
|
| 75 |
+
Algorithm 1 Active Sentence Learning with Adversarial Uncertainty Sampling in Discrete Space
|
| 76 |
+
Input: an unlabeled text corpus $T_0$ , an oracle $O$ , a labeled data $D_0 = \{(s, O(s)) | s \in S_0$ , a small initial text corpus\}, pre-trained LM $f_e$ , fine-tuning interval $j$ , and fine-tuning step $k$ .
|
| 77 |
+
Output: a well-trained model $f = (f_e, f_d)$
|
| 78 |
+
1: Train $f_d$ on $D_0$ with frozen $f_e$ ;
|
| 79 |
+
2: Construct a discrete bijective mapper $M$ , where $M(s) = f_e(s) \in \mathcal{H}$ and $M^{-1}(f_e(s)) = s \in T_0$ ;
|
| 80 |
+
3: Sample a training batch $B_0$ from $D_0$ ;
|
| 81 |
+
4: $i \gets 0$
|
| 82 |
+
5: while $|T_i| > 0$ do
|
| 83 |
+
6: Train decoder $f_d$ on $B_i$ with frozen encoder $f_e$ ;
|
| 84 |
+
7: Generate adversarial data points $A \subset \mathcal{H}$ using the adversarial attack algorithm;
|
| 85 |
+
8: Retrieve adversarial samples $S_a = \{s_a = M^{-1}(x) \in T_i | x \in \mathrm{KNN}(A)\}$ ;
|
| 86 |
+
9: Inject $S_a$ with random samples $S_r$ , where $|S_a| : |S_r| = p : 1 - p$ ;
|
| 87 |
+
10: Select top-k ranked samples $S_{add}$ from $S_a$ w.r.t. the information entropy;
|
| 88 |
+
11: Label new data $Q \gets \{(s, O(s)) | s \in S_{add}\}$ ;
|
| 89 |
+
12: Update labeled data $D_{i+1} \gets D_i \cup Q$ ;
|
| 90 |
+
13: Remove newly labeled data from unlabeled dataset $T_{i+1} \gets T_i - S_{add}$ ;
|
| 91 |
+
14: Sample a training batch $B_{i+1}$ from $Q$ and $D_{i+1}$ by the ratio of $q : 1 - q$ ;
|
| 92 |
+
15: if $i \mod j = 0$ then
|
| 93 |
+
16: Fine-tune $f$ with $D_{i+1}$ for $k$ steps;
|
| 94 |
+
17: Update the mapper $M$ with the fine-tuned encoder $f_e$ and text corpus $T_{i+1}$ ;
|
| 95 |
+
18: end if
|
| 96 |
+
19: $i \gets i + 1$
|
| 97 |
+
20: end while
|
| 98 |
+
|
| 99 |
+
# 3.1 AUSDS
|
| 100 |
+
|
| 101 |
+
AUSDS first defines a latent space, over which sentences are distinguishable according to the model's decision boundary. The latent space is usually determined by the encoder architecture and the downstream task. We detail the latent space definition of specific encoders and tasks in Sec. 4.1.
|
| 102 |
+
|
| 103 |
+
At first, we sample a batch of labeled texts and compute their representation as well as their gradients in the latent space. Using the latent states and their gradients, we perform adversarial attacks to generate adversarial data points $A$ near the decision boundary in the latent space. Adversarial attacks are performed using the following existing approaches:
|
| 104 |
+
|
| 105 |
+
- Fast Gradient Value (FGV) (Rozsa et al., 2016): a one-step gradient-based approach with high efficiency. The adversarial data points are generated by:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\mathbf {x} ^ {\prime} = \mathbf {x} + \lambda \cdot \nabla_ {\mathbf {x}} F _ {d} (\mathbf {x}) \tag {1}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $\lambda$ is a hyper parameter, and $F_{d}$ is the cross entropy loss on $\mathbf{x}$ .
|
| 112 |
+
|
| 113 |
+
- DeepFool (Moosavi-Dezfooli et al., 2016): an iterative approach to find the minimal per
|
| 114 |
+
|
| 115 |
+
turbation that is sufficient to change the estimated label.
|
| 116 |
+
|
| 117 |
+
C&W (Carlini and Wagner, 2017): an optimization-based approach with the optimization problem defined as:
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\text {m i n i m i z e} D \left(\mathbf {x}, \mathbf {x} ^ {\prime}\right) + c \cdot g \left(\mathbf {x} ^ {\prime}\right) \tag {2}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
where $g(\cdot)$ is a manually designed function, satisfying $g(\mathbf{x})\leq 0$ if and only if $\mathbf{x}$ 's label is a specific target label. $D$ is a distance measurement like Minkowski distance.
|
| 124 |
+
|
| 125 |
+
FGV is efficient in the calculation, whereas the other two methods typically find more precise adversarial data points but with larger computational costs. We use all of them in our experimental part to show the effectiveness of the AUSDS.
|
| 126 |
+
|
| 127 |
+
In our sentence learning scenario, the adversarial data points $A$ cannot be grounded on real natural language text samples. Thus we perform k-nearest neighbor (KNN) search (Altman, 1992) to find unlabeled text samples whose latent states are k-nearest to the adversarial data points $A$ .
|
| 128 |
+
|
| 129 |
+
We implement the KNN search using Faiss<sup>1</sup> (Johnson et al., 2017), an efficient
|
| 130 |
+
|
| 131 |
+
similarity search algorithm with GPUs. The computational cost of KNN search results from two processes, including constructing a sample mapper $M$ between text and latent space, and searching similar latent states of adversarial data points. The sampler mapper $M$ here is constructed as a hash map, which is of high computational efficiency, to memorize the mapping between an unlabeled text $s$ and its latent representation $\mathbf{x}$ . The sample mapper is only reconstructed when the encoder is updated, and infrequent encoder updates contribute to efficiency. Besides, the searching process is also fast ( $100 \times$ faster than generating $A$ ) thanks to Faiss. Thus it is possible to performed AUSDS frequently at batch-level without harming computation.
|
| 132 |
+
|
| 133 |
+
After acquiring adversarial samples $S_{a}$ using KNN search, we mix $S_{a}$ with random samples $S_{r}$ drawn from unlabeled text corpus $T_{i}$ by the ratio of $p:1 - p$ , where $p$ is a hyper-parameter determined on the development set. The motivation of appending random samples is to balance exploration and exploitation, thus avoiding the model continuously retrieve samples in a small neighborhood.
|
| 134 |
+
|
| 135 |
+
We perform top-k ranking over the information entropy of the mixed samples to further retrieve samples with higher uncertainty. Since the size of the mixed samples is comparable to the batch size, the computation cost is acceptable. The remaining samples are further sent to an oracle annotator $O$ to obtain their labels.
|
| 136 |
+
|
| 137 |
+
# 3.2 Active Learning Framework
|
| 138 |
+
|
| 139 |
+
The overall procedure of the proposed framework equipped with AUSDS is outlined in Algorithm 1
|
| 140 |
+
|
| 141 |
+
Initialization The initialization stage is shown in Algorithm 1 line 1-4. We first initialize our encoder $f_{e}$ with the pre-trained LM, which can be $\mathrm{BERT}_{\mathrm{BASE}}$ (Devlin et al., 2018) or $\mathrm{ELMo}$ (Peters et al., 2018). The decoder here is built upon the latent space and is randomly initialized. After building up the neural model architecture, we train only the decoder on existing labeled data $D_{0}$ to compute an initial decision boundary on the latent space. Meanwhile, we construct an initial discrete sample mapper $M$ used for the sampling block. Finally, we sample a training batch $B_{0}$ from labeled data corpus $D_{0}$ , and set current training step $i$ to 0.
|
| 142 |
+
|
| 143 |
+
Training The training stage is shown in Algorithm 1 line 6. With the defined decoders $f_{d}$ and a training batch $B_{i}$ , we train the decoder with a cross
|
| 144 |
+
|
| 145 |
+
entropy loss (Fig. 2.b). Note that during the training process, we freeze the encoder as well as the latent space, where a frozen latent space contributes to computational efficiency without reconstructing the mapper $M$ .
|
| 146 |
+
|
| 147 |
+
Sampling The sampling stage is shown in Algorithm 1 line 7-14. As is shown in Sec. 3.1, given the gradients on the current batch $B_{i}$ w.r.t. latent states during training, the sampling process generates the adversarial samples $S_{a}$ and labels the samples with high uncertainty from a mixture of $S_{a}$ and randomly injected unlabeled data $S_{r}$ . The labeled samples $Q$ are removed from the unlabeled text corpus and inserted into labeled data, resulting in $T_{i + 1}$ and $D_{i + 1}$ respectively. Then we create a new training batch consist of samples from $Q$ and $D_{i + 1}$ with a ratio of $q:1 - q$ , which favors the newly selected data $Q$ , because the newly selected ones are considered as more critical to the current decision boundary.
|
| 148 |
+
|
| 149 |
+
Fine-Tuning The fine-tuning stage is shown in Algorithm 1 line 15-18. We fine-tune the encoder for $k$ steps after $j$ batches are trained. During the fine-tuning process, both of the encoder and the decoder are trained on the accumulated labeled data set $D_{i+1}$ . The encoder is also fine-tuned for enhancing overall performance. Experiments show that the final performance is harmed a lot without updating the encoder. Then we update the mapper $M$ for the future KNN search, because the fine-tuning of the encoder corrupts the projection from texts to latent spaces, which requires renewal of the sampler mapper $M$ . The algorithm terminates until the unlabeled text corpus $T_i$ is used up.
|
| 150 |
+
|
| 151 |
+
# 4 Experiments
|
| 152 |
+
|
| 153 |
+
We evaluate the AUSDS learning framework on sequence classification and sequence labeling tasks. For the oracle labeler $O$ , we directly use the labels provided by the datasets. In all the experiments, we take average results of 5 runs with different random seeds to alleviate the influence of randomness.
|
| 154 |
+
|
| 155 |
+
# 4.1 Set-up
|
| 156 |
+
|
| 157 |
+
Dataset. We use five datasets, namely Stanford Sentiment Treebank (SST-2 / SST-5) (Socher et al., 2013), Microsoft Research Paraphrase Corpus (MRPC) (Dolan et al., 2004), AG's News Corpus (AG News) (Zhang et al., 2015) and CoNLL 2003 Named Entity Recognition dataset
|
| 158 |
+
|
| 159 |
+
<table><tr><td>Dataset</td><td>Task</td><td>Sample Size</td></tr><tr><td>SST-2 (Socher et al., 2013)</td><td>sequence classification</td><td>11.8k sentences, 215k phrases</td></tr><tr><td>SST-5 (Socher et al., 2013)</td><td>sequence classification</td><td>11.8k sentences, 215k phrases</td></tr><tr><td>MRPC (Dolan et al., 2004)</td><td>sequence classification</td><td>5,801 sentence pairs</td></tr><tr><td>AG News (Zhang et al., 2015)</td><td>sequence classification</td><td>12k sentences</td></tr><tr><td>CoNLL'03 (Sang and De Meulder, 2003)</td><td>sequence labeling</td><td>22k sentences, 300k tokens</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Table 1: 5 datasets we used for sentence learning experiments, across sequence classification and sequence labeling tasks.
|
| 162 |
+
|
| 163 |
+
<table><tr><td>Dataset</td><td>RM</td><td>US</td><td>AUSDS(FGV)</td><td>AUSDS(DeepFool)</td><td>AUSDS(C&W)</td></tr><tr><td>SST-2</td><td>1061x</td><td>1x</td><td>38x</td><td>38x</td><td>28x</td></tr><tr><td>SST-5</td><td>1939x</td><td>1x</td><td>52x</td><td>52x</td><td>38x</td></tr><tr><td>MRPC</td><td>97x</td><td>1x</td><td>14x</td><td>14x</td><td>11x</td></tr><tr><td>AG News</td><td>1434x</td><td>1x</td><td>51x</td><td>47x</td><td>38x</td></tr><tr><td>CoNLL'03</td><td>45x</td><td>1x</td><td>10x</td><td>—</td><td>—</td></tr></table>
|
| 164 |
+
|
| 165 |
+
Table 2: The average speedup of each sampling step in comparison with US on 5 datasets with BERT as the encoder. The statistics are collected using Tesla-V100 GPU. US scans the unlabeled data once when $2\%$ of data are labeled. The AUSDS using DeepFool and C&W on CoNLL'03 are omitted because these adversarial attack methods are not suitable for sequence labeling.
|
| 166 |
+
|
| 167 |
+
(CoNLL'03) (Sang and De Meulder, 2003) for experiments. The statistics can be found in Table 1. The train/development/test sets follow the original settings in those papers. We use accuracy for sequence classification and f1-score for sequence labeling as the evaluation metric.
|
| 168 |
+
|
| 169 |
+
Baseline Approaches. We use two common baseline approaches in NLP active learning to compare with our framework, namely random sampling (RM) and entropy-based uncertainty sampling (US). For sequence classification tasks, we adopt the widely used Max Entropy (ME) (Berger et al., 1996) as uncertainty measurement:
|
| 170 |
+
|
| 171 |
+
$$
|
| 172 |
+
H ^ {M E} (\mathbf {x}) = - \sum_ {m = 1} ^ {c} P (\mathbf {y} = m | \mathbf {x}) \log P (\mathbf {y} = m | \mathbf {x}) \quad (3)
|
| 173 |
+
$$
|
| 174 |
+
|
| 175 |
+
where $c$ is the number of classes. For sequence labeling tasks, we use total token entropy (TTE) (Settles and Craven, 2008) as uncertainty measurement:
|
| 176 |
+
|
| 177 |
+
$$
|
| 178 |
+
H ^ {T T E} (\mathbf {x}) = - \sum_ {i = 1} ^ {N} \sum_ {m = 1} ^ {l} P \left(\mathbf {y} _ {i} = m | \mathbf {x}\right) \log P \left(\mathbf {y} _ {i} = m | \mathbf {x}\right) \tag {4}
|
| 179 |
+
$$
|
| 180 |
+
|
| 181 |
+
where $N$ is the sequence length and $l$ is the number of labels.
|
| 182 |
+
|
| 183 |
+
Latent Space Definition We use the adversarial attack in our AUSDS learning framework to find informative samples, which rely on a well-defined latent space. Two types of latent spaces are defined here based on the encoder architectures and tasks:
|
| 184 |
+
|
| 185 |
+
1. For pre-trained LMs like BERT (Devlin et al., 2018), which has an extra token [CLS] for sequence classification, we directly use its latent state $\mathbf{x}$ as the representation of the whole sentence in the latent space $\mathcal{H}$ .
|
| 186 |
+
2. For the other circumstances where no such special token can be used, a mean-pooling operation is applied to the encoder output, i.e. $\mathbf{x} = \frac{1}{n}\sum_{t=1}^{n}h_t$ , where $h_t$ denotes the contextual word representation of the $t_\mathrm{th}$ token produced by the encoder. The latent space $\mathcal{H}$ is spanned by all the latent states.
|
| 187 |
+
|
| 188 |
+
Implementation Details. We implement our frameworks based on $\mathrm{BERT}_{\mathrm{BASE}}$ model and $\mathrm{ELMo}^3$ . The configurations of the two models are the same as reported in (Devlin et al., 2018) and (Peters et al., 2018) respectively. The implementation of the KNN search is introduced in section 3.3. For the rest hyperparameters in our framework, 1) the batch size and the size of $Q$ is set as 32 (16 on MRPC dataset); 2) the fine-tuning interval $j$ and the fine-tuning step size $k$ are set as 50 steps; 3) the ratio $q$ is set as 0.3. All the tuning experiments are performed on the dev sets of five datasets. The accumulated labeled data set $D$ is initialized the same for different approaches, taking $0.1\%$ of the whole unlabeled data ( $0.5\%$ for MRPC because the dataset is relatively small).
|
| 189 |
+
|
| 190 |
+
<table><tr><td></td><td>Label Size</td><td>2%</td><td>4%</td><td>6%</td><td>8%</td><td>10%</td></tr><tr><td rowspan="5">SST-2</td><td>RM</td><td>87.78(.003)</td><td>89.85(.004)</td><td>89.85(.010)</td><td>89.69(.004)</td><td>90.26(.008)</td></tr><tr><td>US</td><td>87.74(.004)</td><td>90.25(.006)</td><td>90.38(.008)</td><td>90.25(.006)</td><td>91.27(.007)</td></tr><tr><td>AUSDS (FGV)</td><td>89.18(.002)</td><td>89.88(.008)</td><td>89.16(.014)</td><td>91.07(.005)</td><td>89.95(.003)</td></tr><tr><td>AUSDS (DeepFool)</td><td>88.74(.004)</td><td>90.06(.003)</td><td>89.84(.007)</td><td>90.74(.006)</td><td>91.58(.002)</td></tr><tr><td>AUSDS (C&W)</td><td>87.97(.003)</td><td>89.95(.005)</td><td>90.83(.007)</td><td>90.12(.003)</td><td>91.13(.001)</td></tr><tr><td rowspan="5">SST-5</td><td>RM</td><td>49.45(.010)</td><td>50.01(.007)</td><td>50.88(.006)</td><td>50.39(.014)</td><td>51.35(.005)</td></tr><tr><td>US</td><td>49.10(.008)</td><td>49.54(.009)</td><td>50.63(.008)</td><td>50.90(.012)</td><td>51.43(.005)</td></tr><tr><td>AUSDS (FGV)</td><td>49.57(.006)</td><td>50.36(.008)</td><td>50.09(.009)</td><td>50.19(.014)</td><td>50.62(.011)</td></tr><tr><td>AUSDS (DeepFool)</td><td>50.20(.012)</td><td>51.87(.003)</td><td>51.74(.012)</td><td>50.97(.012)</td><td>51.23(.007)</td></tr><tr><td>AUSDS (C&W)</td><td>48.28(.012)</td><td>48.78(.014)</td><td>51.58(.007)</td><td>51.40(.010)</td><td>47.42(.006)</td></tr><tr><td rowspan="5">MRPC</td><td>RM</td><td>67.33(.008)</td><td>68.31(.006)</td><td>68.56(.018)</td><td>70.06(.021)</td><td>71.15(.020)</td></tr><tr><td>US</td><td>62.14(.090)</td><td>69.34(.005)</td><td>69.11(.010)</td><td>70.53(.017)</td><td>71.49(.016)</td></tr><tr><td>AUSDS (FGV)</td><td>68.89(.014)</td><td>69.30(.023)</td><td>70.28(.015)</td><td>70.06(.012)</td><td>69.30(.019)</td></tr><tr><td>AUSDS (DeepFool)</td><td>67.92(.009)</td><td>68.88(.017)</td><td>69.68(.017)</td><td>71.69(.014)</td><td>71.55(.012)</td></tr><tr><td>AUSDS (C&W)</td><td>67.91(.014)</td><td>68.53(.017)</td><td>70.46(.012)</td><td>70.49(.012)</td><td>68.89(.016)</td></tr><tr><td rowspan="5">AG News</td><td>RM</td><td>89.89(.003)</td><td>90.89(.002)</td><td>91.37(.002)</td><td>91.79(.002)</td><td>92.21(.002)</td></tr><tr><td>US</td><td>90.29(.006)</td><td>91.59(.007)</td><td>92.34(.003)</td><td>92.71(.001)</td><td>93.01(.001)</td></tr><tr><td>AUSDS (FGV)</td><td>90.75(.002)</td><td>91.55(.002)</td><td>92.26(.003)</td><td>92.62(.001)</td><td>93.16(.001)</td></tr><tr><td>AUSDS (DeepFool)</td><td>90.67(.004)</td><td>91.65(.004)</td><td>92.43(.004)</td><td>92.66(.004)</td><td>93.12(.002)</td></tr><tr><td>AUSDS (C&W)</td><td>90.24(.002)</td><td>91.29(.002)</td><td>92.30(.004)</td><td>92.90(.002)</td><td>93.10(.003)</td></tr><tr><td rowspan="5">CoNLL'03</td><td>RM</td><td>80.42(.002)</td><td>83.38(.002)</td><td>85.39(.005)</td><td>86.78(.005)</td><td>87.42(.003)</td></tr><tr><td>US</td><td>78.12(.002)</td><td>81.49(.019)</td><td>84.45(.004)</td><td>86.73(.008)</td><td>87.79(.004)</td></tr><tr><td>AUSDS (FGV)</td><td>80.65(.006)</td><td>83.60(.003)</td><td>85.98(.010)</td><td>87.10(.004)</td><td>87.83(.003)</td></tr><tr><td>AUSDS (DeepFool)</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td></tr><tr><td>AUSDS (C&W)</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td></tr></table>
|
| 191 |
+
|
| 192 |
+
Table 3: The convergence results w.r.t. the label size in the training from scratch setting with BERT as the encoder. The label size denotes for the ratio of labeled data. The numbers are the averaged results of 5 runs on the test set. The best results with each label size are marked as bold. The sequence classification and sequence labeling tasks are evaluated with accuracy and f1 score, respectively. The AUSDS using DeepFool and C&W on CoNLL'03 are omitted because these adversarial attack methods are not suitable for sequence labeling.
|
| 193 |
+
|
| 194 |
+
<table><tr><td>Label Size</td><td>2%</td><td>4%</td><td>6%</td><td>8%</td><td>10%</td></tr><tr><td>RM</td><td>81.58(.004)</td><td>82.90(.006)</td><td>83.53(.008)</td><td>82.15(.016)</td><td>84.40(.006)</td></tr><tr><td>US</td><td>78.23(.007)</td><td>80.34(.003)</td><td>81.99(.006)</td><td>82.34(.008)</td><td>82.21(.004)</td></tr><tr><td>AUSDS (FGV)</td><td>81.22(.004)</td><td>83.25(.001)</td><td>84.18(.005)</td><td>84.49(.004)</td><td>84.62(.009)</td></tr><tr><td>AUSDS (DeepFool)</td><td>82.37(.003)</td><td>83.31(.004)</td><td>83.77(.002)</td><td>84.68(.001)</td><td>84.73(.005)</td></tr><tr><td>AUSDS (C&W)</td><td>81.27(.006)</td><td>84.02(.007)</td><td>82.76(.002)</td><td>84.40(.002)</td><td>83.58(.012)</td></tr></table>
|
| 195 |
+
|
| 196 |
+
Table 4: The convergence results w.r.t. the label size in the training from scratch setting with ELMo as encoder on SST-2. The label size denotes for the ratio of labeled data. The best results with each label size are marked as bold.
|
| 197 |
+
|
| 198 |
+
# 4.2 Sampling Effectiveness
|
| 199 |
+
|
| 200 |
+
AUSDS can achieve higher sampling effectiveness than uncertainty sampling due to the sampling bias problem. The main criteria to evaluate an active learning approach is the sampling effectiveness, namely the model performance with a limited amount of unlabeled data being sampled and labeled. Our AUSDS learning framework is compared with the two baselines using the same amount of labeled data. The limitations are set as $2\%$ , $4\%$ , $6\%$ , $8\%$ , and $10\%$ of all labeled data in each dataset. We only include at most $10\%$ of the whole training data labeled, because active learning focuses on training with a quite limited amount
|
| 201 |
+
|
| 202 |
+
of labeled data by selecting more valuable examples to label. It makes no difference whether to perform active learning or not with enough labeled data available. We believe that with less labeled data, the performance gap, namely the difference of sampling effectiveness is more obvious.
|
| 203 |
+
|
| 204 |
+
We propose training from scratch setting to better evaluate the sampling effectiveness, in which models are trained from scratch using the labeled data sampled by different approaches with various labeled data sizes. We argue that simply training the model until convergence after each sampling step, which we call continuous training setting, can easily induce the problem of sampling bias (Huang
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
(a) Margin during Training
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
(b) Margin Distribution
|
| 211 |
+
Figure 3: The margin of outputs on samples selected by different sampling strategies on SST-5. The margin denotes for differences between the largest and the second-largest output probabilities on different classes. The lower the margin is, the closer the sample is located to the decision boundary. Fig. (a) shows the average margin of each sampling step during training. The margins of samples selected by RM and US on whole unlabeled data are also plotted as references. Fig. (b) shows the margin distribution of samples selected from sampling step 800 to 1000, where the average uncertainty becomes steady. US in Fig. (b) is omitted for better visualization.
|
| 212 |
+
|
| 213 |
+
et al., 2010). Biased models in the early training phase lead to worse performance even after more informative samples are given. Thus the performance of models during sampling cannot reflect the real informativeness of selected samples.
|
| 214 |
+
|
| 215 |
+
The from-scratch training results are shown in Table 3. Our framework outperforms the random baselines consistently because it selects more informative samples for identifying the shape of the decision boundary. Also, it outperforms the common uncertainty sampling in most cases with the same labeled data size limits because the frequent sampling processes in our approach alleviate the sampling bias issue. Uncertainty sampling suffers the sampling bias problem because of frequent variation of the decision boundary in the early phase of training, which results in ineffective sampling. The decision boundary is merely determined by a small number of labeled examples in the early phase. And the easily biased decision boundary may lead to the sampling of high uncertainty samples given the current model state but not that representative to the whole unlabelled data. With the overall results on the five standard benchmarks of 2 NLP tasks, we observe that our AUSDS can achieve better sampling effectiveness with DeepFool for sequence classification and FGV for sequence labeling. The results of CW are also included for completeness and comparison.
|
| 216 |
+
|
| 217 |
+
To prove that our AUSDS framework does not heavily depend on BERT, we conduct experiments on SST-2 with ELMo as the encoder, which has a
|
| 218 |
+
|
| 219 |
+
different network structure. The results in Table 4 show that in this setting, our AUSDS framework still achieves higher sampling effectiveness, while the original uncertainty sampling gets stuck in a more severe sampling bias problem. The results in this experiment can also be evidence of the generalization ability of our framework to other pre-trained LM encoding space.
|
| 220 |
+
|
| 221 |
+
# 4.3 Computational Efficiency
|
| 222 |
+
|
| 223 |
+
AUSDS is computationally more efficient than uncertainty sampling. Our AUSDS is computationally efficient enough to be performed at batch-level, thus achieving real-time effective sampling. The average sampling speeds of different approaches are compared w.r.t. US (Table 2).
|
| 224 |
+
|
| 225 |
+
We observe that uncertainty sampling can hardly work in a real-time sampling setting because of the costly sampling process. Our AUSDS are more than 10x faster than common uncertainty sampling. The larger the unlabeled data pool is, the more significant the acceleration is. Our framework spends longer computation time, compared with the random sampling baseline, but still fast enough for real-time batch-level sampling. Moreover, the experimental results on Sampling Effectiveness in Sec. 4.2 show that the extra computation for adversarial samples is worthy with obvious performance enhancement on the same amount of labeled data.
|
| 226 |
+
|
| 227 |
+
# 4.4 Samples Uncertainty
|
| 228 |
+
|
| 229 |
+
AUSDS can actually select examples with higher uncertainty. We plot the margins of outputs of samples selected with different sampling strategies on SST-5 in Fig. 3. We use margin as the measurement of the distance to the decision boundary. Lower margins indicate positions closer to the decision boundary. As shown in Fig. 3(a), the samples selected by our AUSDS with different attack approaches achieve lower average margins during sampling. Samples from step 800 to 1000 are collected to estimate the margin distribution, as shown in Fig. 3(b). It is shown that our AUSDS has better capability to capture the samples with higher uncertainty as their margin distributions are more to the left. The uncertainty sampling performed on the whole unlabeled data gets the most uncertain samples. However, it is very time-consuming and can not be applied frequently.
|
| 230 |
+
|
| 231 |
+
In short, AUSDS achieves better sampling effectiveness in comparison with US because the more efficient batch-level sampling alleviates the problem of sampling bias. Adversarial attacks can be an effective way to find critical data points near the decision boundary.
|
| 232 |
+
|
| 233 |
+
# 5 Conclusion
|
| 234 |
+
|
| 235 |
+
Uncertainty sampling is an effective way of reducing the labeled data size in sentence learning. But uncertainty sampling of high latency may lead to an ineffective sampling problem. In this study, we propose adversarial uncertainty sampling in discrete space for active sentence learning to address the ineffective sampling problem. The proposed AUSDS is more efficient than traditional uncertainty sampling by leveraging adversarial attacks and projecting discrete sentences into pre-trained LM space. Experimental results on five datasets show that the proposed approach outperforms strong baselines in most cases, and achieve better sampling effectiveness.
|
| 236 |
+
|
| 237 |
+
# Acknowledgments
|
| 238 |
+
|
| 239 |
+
The corresponding author is Yong Yu. The SJTU team is supported by "New Generation of AI 2030" Major Project 2018AAA0100900 and NSFC (61702327, 61772333, 61632017, 81771937). We thank Rong Ye, Huadong Chen, Xunpeng Huang, and the anonymous reviewers for their insightful and detailed comments.
|
| 240 |
+
|
| 241 |
+
# References
|
| 242 |
+
|
| 243 |
+
Naomi S Altman. 1992. An introduction to kernel and nearest-neighbor nonparametric regression. The American Statistician, 46(3):175-185.
|
| 244 |
+
Adam L Berger, Vincent J Della Pietra, and Stephen A Della Pietra. 1996. A maximum entropy approach to natural language processing. Computational linguistics, 22(1):39-71.
|
| 245 |
+
Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Šrndić, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. 2013. Evasion attacks against machine learning at test time. In Joint European conference on machine learning and knowledge discovery in databases, pages 387-402. Springer.
|
| 246 |
+
Nicholas Carlini and David Wagner. 2017. Towards evaluating the robustness of neural networks. In 2017 IEEE Symposium on Security and Privacy (SP), pages 39-57. IEEE.
|
| 247 |
+
Yue Deng, KaWai Chen, Yilin Shen, and Hongxia Jin. 2018. Adversarial active learning for sequences labeling and generation. In *IJCAI*, pages 4012-4018.
|
| 248 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.
|
| 249 |
+
Bill Dolan, Chris Quirk, and Chris Brockett. 2004. Unsupervised construction of large paraphrase corpora: Exploiting massively parallel news sources. In Proceedings of the 20th international conference on Computational Linguistics, page 350. Association for Computational Linguistics.
|
| 250 |
+
Melanie Ducoffe and Frederic Precioso. 2018. Adversarial active learning for deep networks: a margin based approach. arXiv preprint arXiv:1802.09841.
|
| 251 |
+
Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. 2014. Explaining and harnessing adversarial examples. arXiv preprint arXiv:1412.6572.
|
| 252 |
+
Steven CH Hoi, Rong Jin, and Michael R Lyu. 2006. Large-scale text categorization by batch mode active learning. In Proceedings of the 15th international conference on World Wide Web, pages 633-642. ACM.
|
| 253 |
+
Sheng-Jun Huang, Rong Jin, and Zhi-Hua Zhou. 2010. Active learning by querying informative and representative examples. In Advances in neural information processing systems, pages 892-900.
|
| 254 |
+
Prateek Jain, Sudheendra Vijayanarasimhan, and Kristen Grauman. 2010. Hashing hyperplane queries to near points with applications to large-scale active learning. In Advances in Neural Information Processing Systems, pages 928-936.
|
| 255 |
+
Jeff Johnson, Matthijs Douze, and Hervé Jégou. 2017. Billion-scale similarity search with GPUs. arXiv preprint arXiv:1702.08734.
|
| 256 |
+
|
| 257 |
+
Ajay J Joshi, Fatih Porikli, and Nikolaos Papanikolopoulos. 2009. Multi-class active learning for image classification. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 2372-2379. IEEE.
|
| 258 |
+
Alexey Kurakin, Ian Goodfellow, and Samy Bengio. 2016. Adversarial examples in the physical world. arXiv preprint arXiv:1607.02533.
|
| 259 |
+
David D Lewis and William A Gale. 1994. A sequential algorithm for training text classifiers. In SI-GIR'94, pages 3-12. Springer.
|
| 260 |
+
Pengcheng Li, Jinfeng Yi, and Lijun Zhang. 2018. Query-efficient black-box attack by active learning. arXiv preprint arXiv:1809.04913.
|
| 261 |
+
Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, and Pascal Frossard. 2016. Deepfool: A simple and accurate method to fool deep neural networks. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR).
|
| 262 |
+
Soham Pal, Yash Gupta, Aditya Shukla, Aditya Kanade, Shirish Shevade, and Vinod Ganapathy. 2020. Activethief: Model extraction using active learning and unannotated public data. AAI.
|
| 263 |
+
Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word representations. arXiv preprint arXiv:1802.05365.
|
| 264 |
+
Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding by generative pre-training. URL https://s3-us-west-2. amazonaws.com/openai-assetss/researchcovers/languageunsupervised/language understanding paper.pdf.
|
| 265 |
+
Andras Rozsa, Ethan M Rudd, and Terrance E Boult. 2016. Adversarial diversity and hard positive generation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 25-32.
|
| 266 |
+
Erik F Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003.
|
| 267 |
+
Burr Settles. 2009. Active learning literature survey. Technical report, University of Wisconsin-Madison Department of Computer Sciences.
|
| 268 |
+
Burr Settles and Mark Craven. 2008. An analysis of active learning strategies for sequence labeling tasks. In Proceedings of the conference on empirical methods in natural language processing, pages 1070-1079. Association for Computational Linguistics.
|
| 269 |
+
Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models
|
| 270 |
+
|
| 271 |
+
for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631-1642.
|
| 272 |
+
Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. 2013. Intriguing properties of neural networks. arXiv preprint arXiv:1312.6199.
|
| 273 |
+
Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. In Advances in neural information processing systems, pages 649-657.
|
| 274 |
+
Jia-Jie Zhu and José Bento. 2017. Generative adversarial active learning. arXiv preprint arXiv:1702.07956.
|
activesentencelearningbyadversarialuncertaintysamplingindiscretespace/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:563b481a9045262948460dc0c8cb3faefbbd20a20adab78d23356bdccbc626a5
|
| 3 |
+
size 413408
|
activesentencelearningbyadversarialuncertaintysamplingindiscretespace/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64d7ed2a36d0a1250f8af58a1cde79c3e5bd5947a706245a51b080cb8121e609
|
| 3 |
+
size 354808
|
activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/7d6b49a6-e5eb-4a5c-9201-b68696e59c6c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:252c8038e7209b1288438f905422bcb98ea59477bab84193f073631e50c2a141
|
| 3 |
+
size 58273
|
activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/7d6b49a6-e5eb-4a5c-9201-b68696e59c6c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93c36428c0f128c0a14f1a64436eacc19904d21336f3b93a112ae623d69164b6
|
| 3 |
+
size 69486
|
activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/7d6b49a6-e5eb-4a5c-9201-b68696e59c6c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0501f502f6b4c9089396856adcdefed58458cebc07e2d755129a03aaa622b1bb
|
| 3 |
+
size 2822869
|
activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/full.md
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Testing: An Unbiased Evaluation Method for Distantly Supervised Relation Extraction
|
| 2 |
+
|
| 3 |
+
Pengshuai Li $^{1}$ , Xinsong Zhang $^{2}$ , Weijia Jia $^{3,1*}$ and Wei Zhao $^{4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Dept. of CSE, Shanghai Jiao Tong University, Shanghai, China $^{2}$ ByteDance AI Lab Institute of AI & Future Networks, Beijing Normal University (Zhuhai) & UIC, PR China $^{4}$ American University of Sharjah, Sharjah, United Arab Emirates pengshuai.li@sjtu.edu.cn zhangxinsong.0320@bytedance.com jiawj@bnu.edu.cn wzhao@aus.edu
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Distant supervision has been a widely used method for neural relation extraction for its convenience of automatically labeling datasets. However, existing works on distantly supervised relation extraction suffer from the low quality of test set, which leads to considerable biased performance evaluation. These biases not only result in unfair evaluations but also mislead the optimization of neural relation extraction. To mitigate this problem, we propose a novel evaluation method named active testing through utilizing both the noisy test set and a few manual annotations. Experiments on a widely used benchmark show that our proposed approach can yield approximately unbiased evaluations for distantly supervised relation extractors.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Relation extraction aims to identify relations between a pair of entities in a sentence. It has been thoroughly researched by supervised methods with hand-labeled data. To break the bottleneck of manual labeling, distant supervision (Mintz et al., 2009) automatically labels raw text with knowledge bases. It assumes that if a pair of entities have a known relation in a knowledge base, all sentences with these two entities may express the same relation. Clearly, the automatically labeled datasets in distant supervision contain amounts of sentences with wrong relation labels. However, previous works only focus on wrongly labeled instances in training sets but neglect those in test sets. Most of them estimate their performance with the held-out evaluation on noisy test sets, which will yield inaccurate evaluations of existing models and seriously mislead the model optimization. As shown in Table 1, we compare the results of held-out evaluation and human evaluation for the same model on a widely used
|
| 14 |
+
|
| 15 |
+
benchmark dataset NYT-10 (Riedel et al., 2010). The biases between human evaluation and existing held-out evaluation are over $10\%$ , which are mainly caused by wrongly labeled instances in the test set, especially false negative instances.
|
| 16 |
+
|
| 17 |
+
<table><tr><td>Evaluations</td><td>P@100</td><td>P@200</td><td>P@300</td></tr><tr><td>Held-out Evaluation</td><td>83</td><td>77</td><td>69</td></tr><tr><td>Human Evaluation</td><td>93(+10)</td><td>92.5(+15.5)</td><td>91(+22)</td></tr></table>
|
| 18 |
+
|
| 19 |
+
Table 1: The Precision at top K predictions $(\%)$ of the model Lin et al. (2016) upon held-out evaluation and human evaluation on NYT-10. Results are obtained by our implementations.
|
| 20 |
+
|
| 21 |
+
A false negative instance is an entity pair labeled as non-relation, even if it has at least one relation in reality. This problem is caused by the incompleteness of existing knowledge bases. For example, over $70\%$ of people included in Freebase have no place of birth (Dong et al., 2014). From a random sampling, we deduce that about $8.75\%$ entity pairs in the test set of NYT-10 are misclassified as non-relation.<sup>1</sup> Clearly, these mislabeled entity pairs yield biased evaluations and lead to inappropriate optimization for distantly supervised relation extraction.
|
| 22 |
+
|
| 23 |
+
In this paper, we propose an active testing approach to estimate the performance of distantly supervised relation extraction. Active testing has been proved effective in evaluating vision models with large-scale noisy datasets (Nguyen et al., 2018). In our approach, we design an iterative approach, with two stage per iteration: vetting stage and estimating stage. In the vetting stage, we adopt an active strategy to select batches of the most valuable entity pairs from the noisy test set for annotating. In the estimating stage, a metric estimator is proposed to obtain a more accurate evaluation.
|
| 24 |
+
|
| 25 |
+
With a few vetting-estimating iterations, evaluation results can be dramatically close to that of human evaluation by using limited vetted data and all noisy data. Experimental results demonstrate that the proposed evaluation method yields approximately unbiased estimations for distantly supervised relation extraction.
|
| 26 |
+
|
| 27 |
+
# 2 Related Work
|
| 28 |
+
|
| 29 |
+
Distant supervision (Mintz et al., 2009) was proposed to deal with large-scale relation extraction with automatic annotations. A series of studies have been conducted with human-designed features in distantly supervised relation extraction (Riedel et al., 2010; Surdeanu et al., 2012; Takamatsu et al., 2012; Angeli et al., 2014; Han and Sun, 2016). In recent years, neural models were widely used to extract semantic meanings accurately without hand-designed features (Zeng et al., 2015; Lin et al., 2017; Zhang et al., 2019). Then, to alleviate the influence of wrongly labeled instances in distant supervision, those neural relation extractors integrated techniques such as attention mechanism (Lin et al., 2016; Han et al., 2018; Huang and Du, 2019), generative adversarial nets (Qin et al., 2018a; Li et al., 2019), and reinforcement learning (Feng et al., 2018; Qin et al., 2018b). However, none of the above methods pay attention to the biased and inaccurate test set. Though human evaluation can yield accurate evaluation results (Zeng et al., 2015; Alt et al., 2019), labeling all the instances in the test set is too costly.
|
| 30 |
+
|
| 31 |
+
# 3 Task Definition
|
| 32 |
+
|
| 33 |
+
In distant supervision paradigm, all sentences containing the same entity pair constitute a bag. Researchers train a relation extractor based on bags of sentences and then use it to predict relations of entity pairs. Suppose that a distantly supervised model returns confident score $^2$ $s_i = \{s_{i1}, s_{i2} \ldots s_{ip}\}$ for entity pair $i \in \{1 \ldots N\}$ , where $p$ is the number of relations, $N$ is the number of entity pairs, and $s_{ij} \in (0, 1)$ . $y_i = \{y_{i1}, y_{i2} \ldots y_{ip}\}$ and $z_i = \{z_{i1}, z_{i2} \ldots z_{ip}\}$ respectively represent automatic labels and true labels for entity pair $i$ , where $y_{ij}$ and $z_{ij}$ are both in $\{0, 1\}^3$ .
|
| 34 |
+
|
| 35 |
+
In widely used held-out evaluation, existing methods observe two key metrics which are precision at top K $(P@K)$ and Precision-Recall curve
|
| 36 |
+
|
| 37 |
+
$(PR \, curve)$ . To compute both metrics, confident score for all entity pairs are sorted in descending order, which is defined as $s' = \{s_1', s_2' \ldots s_P'\}$ where $P = Np$ . Automatic labels and true labels are denoted as $y' = \{y_1', \ldots, y_P'\}$ and $z' = \{z_1', \ldots, z_P'\}$ . In summary, $P@K$ and $R@K$ can be described by the following equations,
|
| 38 |
+
|
| 39 |
+
$$
|
| 40 |
+
P @ K \{z _ {1} ^ {\prime} \dots z _ {P} ^ {\prime} \} = \frac {1}{K} \sum_ {i \leq K} z _ {i} ^ {\prime} \tag {1}
|
| 41 |
+
$$
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
R @ K \left\{z _ {1} ^ {\prime} \dots z _ {P} ^ {\prime} \right\} = \frac {\sum_ {i \leq K} z _ {i} ^ {\prime}}{\sum_ {i \leq P} z _ {i} ^ {\prime}} \tag {2}
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
Held-out evaluation replaces $z'$ with $y'$ to calculate $P@K$ and $R@K$ , which leads to incorrect results obviously.
|
| 48 |
+
|
| 49 |
+
# 4 Methodology
|
| 50 |
+
|
| 51 |
+
In this section, we present the general framework of our method. A small random sampled set is vetted in the initial state. In each iteration there are two steps: 1) select a batch of entity pairs with a customized vetting strategy, label them manually, and add them to the vetted set; 2) use a new metric estimator to evaluate existing models by the noisy set and the vetted set jointly. After a few vetting-evaluating iterations, unbiased performance of relation extraction is appropriately evaluated. In summary, our method consists of two key components: a vetting strategy and a metric estimator.
|
| 52 |
+
|
| 53 |
+
# 4.1 Metric Estimator
|
| 54 |
+
|
| 55 |
+
Our test set consists of two parts: 1) a noisy set $U$ in which we only know automatic label $y_{i}^{\prime}$ ; 2) a vetted set $V$ in which we know both automatic label $y_{i}^{\prime}$ and manual label $\tilde{z}_{i}^{\prime}$ . We treat the true label $z_{i}^{\prime}$ as a latent variable and $\tilde{z}_{i}^{\prime}$ is its observed value. The performance evaluation mainly depends on the estimation of $z_{i}^{\prime}$ . In our work, we estimate the probability as
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
p (z _ {i} ^ {\prime}) = \prod_ {i \in U} p (z _ {i} ^ {\prime} | \Theta) \prod_ {i \in V} \delta (z _ {i} ^ {\prime} = \tilde {z} _ {i} ^ {\prime}) \qquad (3)
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
where $\Theta$ represents all available elements such as confident score, noisy labels and so on. We make the assumption that the distribution of true latent labels is conditioned on $\Theta$ .
|
| 62 |
+
|
| 63 |
+
Given posterior estimates $p(z_i^\prime |\Theta)$ , we can compute the expected performance by replacing the true
|
| 64 |
+
|
| 65 |
+
latent label by its probability. Then, the precision and recall equations can be rewritten as
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
E [ P @ K ] = \frac {1}{K} \left(\sum_ {i \in V _ {K}} \tilde {z} _ {i} ^ {\prime} + \sum_ {i \in U _ {K}} p \left(z _ {i} ^ {\prime} = 1 | \Theta\right)\right) \tag {4}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
E [ R @ K ] = \frac {\sum_ {i \in V _ {K}} \tilde {z} _ {i} ^ {\prime} + \sum_ {i \in U _ {K}} p \left(z _ {i} ^ {\prime} = 1 \mid \Theta\right)}{\sum_ {i \in V} \tilde {z} _ {i} ^ {\prime} + \sum_ {i \in U} p \left(z _ {i} ^ {\prime} = 1 \mid \Theta\right)} \tag {5}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $U_{K}$ and $V_{K}$ denote the unvetted and vetted subsets of $K$ highest-scoring examples in the total set $U\cup V$
|
| 76 |
+
|
| 77 |
+
To predict the true latent label $z_{i}^{\prime}$ for a specific relation, we use noisy label $y_{i}^{\prime}$ and confident score $s_{i}^{\prime}$ . This posterior probability can be derived as (see appendix for proof)
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
p \left(z _ {i} ^ {\prime} \mid y _ {i} ^ {\prime}, s _ {i} ^ {\prime}\right) = \frac {p \left(y _ {j k} \mid z _ {j k}\right) p \left(z _ {j k} \mid s _ {j k}\right)}{\sum_ {v} p \left(y _ {j k} \mid z _ {j k} = v\right) p \left(z _ {j k} = v \mid s _ {j k}\right)} \tag {6}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $v \in \{0,1\}$ . $s_{jk}, y_{jk}, z_{jk}$ are the corresponding elements of $s_i', y_i', z_i'$ before sorting confident score. Given a few vetted data, we fit $p(y_{jk} | z_{jk})$ by standard maximum likelihood estimation (counting frequencies). $p(z_{jk} | s_{jk})$ is fitted by using logistic regression. For each relation, there is a specific logistic regression function to fit.
|
| 84 |
+
|
| 85 |
+
# 4.2 Vetting Strategy
|
| 86 |
+
|
| 87 |
+
In this work, we apply a strategy based on maximum expected model change(MEMC) (Settles, 2009). The vetting strategy is to select the sample which can yield a largest expected change of performance estimation. Let $E_{p(z'|V)}Q$ be the expected performance based on the distribution $p(z'|V)$ estimated from current vetted set $V$ . After vetting example $i$ and updating that estimator, it will become $E_{p(z'|V,z_i')}Q$ . The change caused by vetting example $i$ can be written as
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\Delta_ {i} (z _ {i} ^ {\prime}) = \left| E _ {p (z ^ {\prime} | V)} Q - E _ {p (z ^ {\prime} | V, z _ {i} ^ {\prime})} Q \right| \quad (7)
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
For precision at top K, this expected change can be written as
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
E _ {p \left(z _ {i} ^ {\prime} \mid V\right)} \left[ \Delta_ {i} \left(z _ {i} ^ {\prime}\right) \right] = \frac {2}{K} p _ {i} \left(1 - p _ {i}\right) \tag {8}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $p_i = P(z_i' = 1|\Theta)$ . For the PR curve, every point depends on $P@K$ for different $K$ . Thus, this vetting strategy is also useful for the PR curve.
|
| 100 |
+
|
| 101 |
+
With this vetting strategy, the most valuable data is always selected first. Therefore, vetting budget
|
| 102 |
+
|
| 103 |
+
is the only factor controlling the vetting procedure. In this approach, we take it as a hyper parameter. When the budget is used up, the vetting stops. The procedure is described in Algorithm 1.
|
| 104 |
+
|
| 105 |
+
# Algorithm 1 Active Testing Algorithm
|
| 106 |
+
|
| 107 |
+
Require: unvetted set $U$ , vetted set $V$ , vetting budget $T$ , vetting strategy VS, confident score $S$ , estimator $p(z^{\prime})$
|
| 108 |
+
|
| 109 |
+
1: while $T > 0$ do
|
| 110 |
+
2: select a batch of items $B \in U$ with vetting strategy $VS$
|
| 111 |
+
3: vet B and get manual label $\tilde{z}^{\prime}$
|
| 112 |
+
4: $\mathbf{U} = \mathbf{U} - \mathbf{B},\mathbf{V} = \mathbf{V}\cup \mathbf{B}$
|
| 113 |
+
5: fit $p(z')$ with $U, V, S$
|
| 114 |
+
6: $\mathrm{T} = \mathrm{T} - |B|$
|
| 115 |
+
7: end while
|
| 116 |
+
|
| 117 |
+
# 5 Experiment
|
| 118 |
+
|
| 119 |
+
We conduct sufficient experiments to support our claims; 1) The proposed active testing is able to get more accurate results by introducing very few manual annotations. 2) The held-out evaluation will misdirect the optimization of relation extraction, which can be further proved through re-evaluation of eight up-to-date relation extractors.
|
| 120 |
+
|
| 121 |
+
# 5.1 Experimental Setting
|
| 122 |
+
|
| 123 |
+
Dataset. Our experiments are conducted on a widely used benchmark NYT-10 (Riedel et al., 2010) and an accurate dataset named NYT-19, which contains 500 randomly selected entity pairs from the test set of NYT-10. It contains 106 positive entity pairs and 394 negative entity pairs, in which 35 entity pairs are false negative. NYT-19 has been well labeled by NLP researchers.
|
| 124 |
+
|
| 125 |
+
Initialization. We use PCNN+ATT (Lin et al., 2016) as baseline relation extractors. To be more convincing, we provide the experimental results of BGRU+ATT in the appendix. The initial state of vetted set includes all the positive entity pairs of the test set in NYT-10 and 150 vetted negative entity pairs. The batch size for vetting is 20 and the vetting budget is set to 100 entity pairs.
|
| 126 |
+
|
| 127 |
+
# 5.2 Effect of Active Testing
|
| 128 |
+
|
| 129 |
+
We evaluate the performance of PCNN+ATT with held-out evaluation, human evaluation and our method. The results are shown in Table 2, and Figure 1. Due to high costs of manual labeling for
|
| 130 |
+
|
| 131 |
+
the whole test set, we use the PR-curve on NYT-19 to simulate that on NYT-10.
|
| 132 |
+
|
| 133 |
+
<table><tr><td>Model</td><td>Evaluations</td><td>P@100</td><td>P@200</td><td>P@300</td></tr><tr><td rowspan="3">PCNN+ATT</td><td>Held-out Evaluation</td><td>83</td><td>77</td><td>69</td></tr><tr><td>Our method</td><td>91.2</td><td>88.4</td><td>83.4</td></tr><tr><td>Human Evaluation</td><td>93</td><td>92.5</td><td>91</td></tr></table>
|
| 134 |
+
|
| 135 |
+
Table 2: The Precision at top K predictions $(\%)$ of PCNN+ATT upon held-out evaluation, our method and human evaluation on NYT-10.
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
Figure 1: The PR curve of PCNN+ATT on NYT-19.
|
| 139 |
+
|
| 140 |
+
To measure the distance between two curves, we sample 20 points equidistant on each curve and calculate the Euclidean distance of the two vectors. In this way, our method gets the distances 0.17 to the curve of human evaluation while corresponding distances for held-out evaluation is 0.72. We can observe that 1) The performance biases between manual evaluation and held-out evaluation are too significant to be neglected. 2) The huge biases caused by wrongly labeled instances are dramatically alleviated by our method. Our method obtains at least $8.2\%$ closer precision to manual evaluation than the held-out evaluation.
|
| 141 |
+
|
| 142 |
+
# 5.3 Effect of Vetting Strategy
|
| 143 |
+
|
| 144 |
+
We compare our MEMC strategy with a random vetting strategy as shown in Figure 2. The distance from curves of different vetting strategies to that of human evaluation is 0.176 and 0.284. From the figure, we can conclude that the proposed vetting strategy is much more effective than the random vetting strategy. With the same vetting budget, MEMC gets more accurate performance estimation at most parts of the range.
|
| 145 |
+
|
| 146 |
+
# 5.4 Re-evaluation of Relation Extractors
|
| 147 |
+
|
| 148 |
+
With the proposed performance estimator, we re-evaluate eight up-to-date distantly supervised rela
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Figure 2: The PR curves of PCNN+ATT evaluated with various vetting strategies on NYT-19
|
| 152 |
+
|
| 153 |
+
tion extractors.
|
| 154 |
+
|
| 155 |
+
<table><tr><td>Model</td><td>P@100(%)</td><td>P@200(%)</td><td>P@300(%)</td></tr><tr><td>Zeng et al. 2015</td><td>88.0</td><td>85.1</td><td>82.3</td></tr><tr><td>Lin et al. 2016</td><td>91.2</td><td>88.9</td><td>83.8</td></tr><tr><td>Liu et al. 2017</td><td>94.0</td><td>89.0</td><td>87.0</td></tr><tr><td>Qin et al. 2018b</td><td>88.8</td><td>86.2</td><td>84.8</td></tr><tr><td>Qin et al. 2018a</td><td>87.0</td><td>83.8</td><td>80.8</td></tr><tr><td>Liu et al. 2018</td><td>95.7</td><td>93.4</td><td>89.9</td></tr><tr><td>BGRU</td><td>94.4</td><td>89.5</td><td>84.7</td></tr><tr><td>BGRU+ATT</td><td>95.1</td><td>90.1</td><td>87.1</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 3: The P@N precision of distantly supervised relation extractors on NYT-10. All the methods are implemented with the same framework and running in the same run-time environment.
|
| 158 |
+
|
| 159 |
+
From Table 3, we can observe that: 1) The relative ranking of the models according to precision at top $K$ almost remains the same except Qin et al. 2018b and Qin et al. 2018a. Although GAN and reinforcement learning are helpful to select valuable training instances, they are tendently to be overfitted. 2) Most models make the improvements as they mentioned within papers at high confident score interval. 3) BGRU performs better than any other models, while BGRU based method Liu et al. 2018 achieves highest precision. More results and discussions can be found in the Appendix.
|
| 160 |
+
|
| 161 |
+
# 6 Conclusion
|
| 162 |
+
|
| 163 |
+
In this paper, we propose a novel active testing approach for distantly supervised relation extraction, which evaluates performance of relation extractors with both noisy data and a few vetted data. Our experiments show that the proposed evaluation method is appropriately unbiased and significant for optimization of distantly relation extraction in future.
|
| 164 |
+
|
| 165 |
+
# Acknowledgements
|
| 166 |
+
|
| 167 |
+
This work is partially supported by Chinese National Research Fund (NSFC) Key Project No. 61532013 and No. 61872239; BNU-UIC Institute of Artificial Intelligence and Future Networks funded by Beijing Normal University (Zhuhai) and AI and Data Science Hub, BNU-HKBU United International College (UIC), Zhuhai, Guangdong, China.
|
| 168 |
+
|
| 169 |
+
# References
|
| 170 |
+
|
| 171 |
+
Christoph Alt, Marc Hübner, and Leonhard Hennig. 2019. Fine-tuning pre-trained transformer language models to distantly supervised relation extraction. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1388-1398.
|
| 172 |
+
Gabor Angeli, Julie Tibshirani, Jean Wu, and Christopher D Manning. 2014. Combining distant and partial supervision for relation extraction. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1556-1567.
|
| 173 |
+
Xin Dong, Evgeniy Gabrilovich, Geremy Heitz, Wilko Horn, Ni Lao, Kevin Murphy, Thomas Strohmann, Shaohua Sun, and Wei Zhang. 2014. Knowledge vault: A web-scale approach to probabilistic knowledge fusion. In Proceedings of the 20th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), pages 601-610.
|
| 174 |
+
Jun Feng, Minlie Huang, Li Zhao, Yang Yang, and Xiaoyan Zhu. 2018. Reinforcement learning for relation classification from noisy data. In Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence (AAAI), pages 5779-5786.
|
| 175 |
+
Xianpei Han and Le Sun. 2016. Global distant supervision for relation extraction. In Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence (AAAI), pages 2950-2956.
|
| 176 |
+
Xu Han, Pengfei Yu, Zhiyuan Liu, Maosong Sun, and Peng Li. 2018. Hierarchical relation extraction with coarse-to-fine grained attention. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2236-2245.
|
| 177 |
+
Yuyun Huang and Jinhua Du. 2019. Self-attention enhanced cnns and collaborative curriculum learning for distantly supervised relation extraction. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 389-398.
|
| 178 |
+
Pengshuai Li, Xinsong Zhang, Weijia Jia, and Hai Zhao. 2019. Gan driven semi-distant supervision
|
| 179 |
+
|
| 180 |
+
for relation extraction. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3026-3035.
|
| 181 |
+
Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2017. Neural relation extraction with multi-lingual attention. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (ACL), pages 34-43.
|
| 182 |
+
Yankai Lin, Shiqi Shen, Zhiyuan Liu, Huanbo Luan, and Maosong Sun. 2016. Neural relation extraction with selective attention over instances. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL), pages 2124-2133.
|
| 183 |
+
Tianyi Liu, Xinsong Zhang, Wanhao Zhou, and Weijia Jia. 2018. Neural relation extraction via inersentence noise reduction and transfer learning. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2195-2204.
|
| 184 |
+
Tianyu Liu, Kexiang Wang, Baobao Chang, and Zhi-fang Sui. 2017. A soft-label method for noisetolerant distantly supervised relation extraction. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1790-1795.
|
| 185 |
+
Mike Mintz, Steven Bills, Rion Snow, and Dan Jurafsky. 2009. Distant supervision for relation extraction without labeled data. In Proceedings of the Joint Conference of the 47th Annual Meeting of the Association for Computational Linguistics and the 4th International Joint Conference on Natural Language Processing of the AFNLP (ACL-IJCNLP), pages 1003-1011.
|
| 186 |
+
Phuc Xuan Nguyen, Deva Ramanan, and Charless C. Fowlkes. 2018. Active testing: An efficient and robust framework for estimating accuracy. In ICML, pages 3759-3768.
|
| 187 |
+
Pengda Qin, Weiran Xu, and William Yang Wang. 2018a. Dsgan: Generative adversarial training for distant supervision relation extraction. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (ACL), pages 496-505.
|
| 188 |
+
Pengda Qin, Weiran Xu, and William Yang Wang. 2018b. Robust distant supervision relation extraction via deep reinforcement learning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (ACL), pages 2137-2147.
|
| 189 |
+
Sebastian Riedel, Limin Yao, and Andrew McCallum. 2010. Modeling relations and their mentions without labeled text. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases (ECML-PKDD), pages 148-163.
|
| 190 |
+
|
| 191 |
+
Burr Settles. 2009. Active learning literature survey. Technical report, University of Wisconsin-Madison Department of Computer Sciences.
|
| 192 |
+
|
| 193 |
+
Mihai Surdeanu, Julie Tibshirani, Ramesh Nallapati, and Christopher D Manning. 2012. Multi-instance multi-label learning for relation extraction. In Proceedings of the 2012 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL), pages 455-465.
|
| 194 |
+
|
| 195 |
+
Shingo Takamatsu, Issei Sato, and Hiroshi Nakagawa. 2012. Reducing wrong labels in distant supervision for relation extraction. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL), pages 721-729.
|
| 196 |
+
|
| 197 |
+
Daojian Zeng, Kang Liu, Yubo Chen, and Jun Zhao. 2015. Distant supervision for relation extraction via piecewise convolutional neural networks. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1753-1762.
|
| 198 |
+
|
| 199 |
+
Xinsong Zhang, Pengshuai Li, Weijia Jia, and Hai Zhao. 2019. Multi-labeled relation extraction with attentive capsule network. In Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence (AAAI), pages 3243-3249.
|
| 200 |
+
|
| 201 |
+
# A Appendices
|
| 202 |
+
|
| 203 |
+
# A.1 Logistic Regression
|
| 204 |
+
|
| 205 |
+
Here we provide the derivation of Equation.6 in the main paper.
|
| 206 |
+
|
| 207 |
+
$$
|
| 208 |
+
\begin{array}{l} p \left(z _ {i} ^ {\prime} \mid y _ {i} ^ {\prime}, s _ {i} ^ {\prime}\right) = \frac {p \left(z _ {i} ^ {\prime} , y _ {i} ^ {\prime} , s _ {i} ^ {\prime}\right)}{\sum_ {v} p \left(z _ {i} ^ {\prime} = v , y _ {i} ^ {\prime} , s _ {i} ^ {\prime}\right)} \\ = \frac {p \left(z _ {j k} , y _ {j k} , s _ {j k}\right)}{\sum_ {v} p \left(z _ {j k} = v , y _ {j k} , s _ {j k}\right)} \\ = \frac {p \left(y _ {j k} \mid z _ {j k} , s _ {j k}\right) p \left(z _ {j k} \mid s _ {j k}\right)}{\sum_ {v} p \left(y _ {j k} \mid z _ {j k} = v , s _ {j k}\right) p \left(z _ {j k} = v \mid s _ {j k}\right)} \\ \end{array}
|
| 209 |
+
$$
|
| 210 |
+
|
| 211 |
+
We assume that given $z_{jk}$ , the observed label $y_{jk}$ is conditionally independent of $s_{jk}$ , which means $p(y_{jk}|z_{jk}, s_{jk}) = p(y_{jk}|z_{jk})$ . The expression is simplified to:
|
| 212 |
+
|
| 213 |
+
$$
|
| 214 |
+
p (z _ {i} ^ {\prime} | y _ {i} ^ {\prime}, s _ {k} ^ {\prime}) = \frac {p (y _ {j k} | z _ {j k}) p (z _ {j k} | s _ {j k})}{\sum_ {v} p (y _ {j k} | z _ {j k} = v) p (z _ {j k} = v | s _ {j k})}
|
| 215 |
+
$$
|
| 216 |
+
|
| 217 |
+
# A.2 Vetting Strategy
|
| 218 |
+
|
| 219 |
+
Here we provide the derivation of Equation.8 in the main paper.
|
| 220 |
+
|
| 221 |
+
$$
|
| 222 |
+
\begin{array}{l} E _ {p \left(z _ {i} ^ {\prime} \mid V\right)} \left[ \Delta_ {i} \left(z _ {i} ^ {\prime}\right) \right] = p _ {i} \frac {1}{K} \left| 1 - p _ {i} \right| + \left(1 - p _ {i}\right) \frac {1}{K} \left| 0 - p _ {i} \right| \\ = \frac {2}{K} p _ {i} (1 - p _ {i}) \\ \end{array}
|
| 223 |
+
$$
|
| 224 |
+
|
| 225 |
+
<table><tr><td>Model</td><td>Evaluations</td><td>P@100</td><td>P@200</td><td>P@300</td></tr><tr><td rowspan="3">BGRU+ATT</td><td>Held-out Evaluation</td><td>82</td><td>78.5</td><td>74.3</td></tr><tr><td>Our method</td><td>95.2</td><td>90.1</td><td>87.1</td></tr><tr><td>Human Evaluation</td><td>98</td><td>96</td><td>95</td></tr></table>
|
| 226 |
+
|
| 227 |
+
Table 4: The Precision at top K predictions $(\%)$ of BGRU+ATT upon held-out evaluation, our method and human evaluation on NYT-10.
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
Figure 3: The PR curve of BGRU+ATT on NYT-19.
|
| 231 |
+
|
| 232 |
+
# A.3 Experimental result of BGRU+ATT
|
| 233 |
+
|
| 234 |
+
We also evaluate the performance of BGRU+ATT with held-out evaluation, human evaluation and our method. The results are shown in Table 4, and Figure 3. Our method gets the distances 0.15 to the curve of human evaluation while corresponding distances for held-out evaluation is 0.55.
|
| 235 |
+
|
| 236 |
+
# A.4 The result of different iterations
|
| 237 |
+
|
| 238 |
+
We have recorded the distance of different iterations between the curves obtained by our method and manual evaluation in Figure 4. With the results, we can observe that the evaluation results obtained by our method become closer to human evaluation when the number of annotated entity pairs is less than 100. When the number is more than 100, the distance no longer drops rapidly but begins to fluctuate.
|
| 239 |
+
|
| 240 |
+
# B Case Study
|
| 241 |
+
|
| 242 |
+
We present realistic cases in NYT-10 to show the effectiveness of our method. In Figure 6, all cases are selected from Top 300 predictions of PCNN+ATT. These instances are all negative instances and has the automatic label $NA$ in NYT-10. In held-out evaluation, relation predictions for these instances are judged as wrong. However, part of them are false negative instances in fact and have the corresponding relations, which cause considerable biases between manual and held-out evaluation. In
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 4: The result of different iterations for the active testing algorithm with PCNN+ATT and BGRU+ATT
|
| 246 |
+
|
| 247 |
+
our approach, those relation predictions for false negative instances are given a high probability to be corrected. At the same time, true negative instances are accurately identified and given a low (near zero) probability.
|
| 248 |
+
|
| 249 |
+
# C Re-evaluation Discussion
|
| 250 |
+
|
| 251 |
+
The detailed descriptions and discussions of re-evaluation experiments are conducted in this section.
|
| 252 |
+
|
| 253 |
+
# C.1 Models
|
| 254 |
+
|
| 255 |
+
PCNN (Zeng et al., 2015) is the first neural method used in distant supervision without human-designed features.
|
| 256 |
+
|
| 257 |
+
PCNN+ATT (Lin et al., 2016) further integrates a selective attention mechanism to alleviate the influence of wrongly labeled instances. The selective attention mechanism generates attention weights over multiple instances, which is expected to reduce the weights of those noisy instances dynamically.
|
| 258 |
+
|
| 259 |
+
PCNN+ATT+SL (Liu et al., 2017) is the development of PCNN+ATT. To correct the wrong labels at entity-pair level during training, the labels of entity pairs are dynamically changed according to the confident score of the predictive labels. Clearly, this method highly depends on the quality of label generator, which has great potential to be over-fitting. PCNN+ATT+RL (Qin et al., 2018b) adopts reinforcement learning to overcome wrong labeling problem for distant supervision. A deep reinforcement learning agent is designed to choose correctly labeled instances based on the performance change of the relation classifier. After that, PCNN+ATT is adopted on the filtered data to do relation classifi
|
| 260 |
+
|
| 261 |
+
cation.
|
| 262 |
+
|
| 263 |
+
PCNN+ATT+DSGAN (Qin et al., 2018a) is an adversarial training framework to learn a sentence level true-positive generator. The positive samples generated by the generator are labeled as negative to train the generator. The optimal generator is obtained when the discriminator cannot differentiate them. Then the generator is adopted to filter distant supervision training dataset. PCNN+ATT is applied to do relation extraction on the new dataset. BGRU is one of recurrent neural network, which can effectively extract global sequence information. It is a powerful fundamental model for wide use of natural language processing tasks.
|
| 264 |
+
|
| 265 |
+
BGRU+ATT is a combination of BGRU and the selective attention.
|
| 266 |
+
|
| 267 |
+
STPRE (Liu et al., 2018) extracts relation features with BGRU. To reduce inner-sentence noise, authors utilize a Sub-Tree Parse(STP) method to remove irrelevant words. Furthermore, model parameters are initialized with a prior knowledge learned from the entity type prediction task by transfer learning.
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
Figure 5: PR curve of distantly supervised relation extractors on NYT-10 with the proposed active testing.
|
| 271 |
+
|
| 272 |
+
# C.2 Discussion
|
| 273 |
+
|
| 274 |
+
In this section, we additionally provide PR curves to show the performance of baselines. From both Table 3 and Figure 5, we are aware of that: 1) The relative ranking is quite different from that on held-out evaluation according to PR curve. 2) The selective attention has limited help in improving the overall performance, even though it may have positive effects at high confident score. 4) The soft-label method greatly improves the accuracy at high confident score but significantly reduces the overall performance. We deduce that it is severely
|
| 275 |
+
|
| 276 |
+
<table><tr><td></td><td>Instances</td><td>Real Label</td><td>Prediction</td><td>Probability</td></tr><tr><td rowspan="3">false negative</td><td>He renewed that call four years ago in a document jointly written with Ami Ayalon, a former chief of Israel's shin bet security agency and a leader of the labor party.</td><td>/person/nationality</td><td>/person/nationality</td><td>1.0(vetted)</td></tr><tr><td>But, if so, you probably would not be familiar with the town of Ramapo in Rockland County.</td><td>/location/contain</td><td>/location/contain</td><td>0.842</td></tr><tr><td>Mr. voulgaris lives in oyster bay but has summered on shelter island since he was a child growing up in Huntington in western Suffolk County.</td><td>/location/contain</td><td>/location/contain</td><td>0.837</td></tr><tr><td rowspan="3">true negative</td><td>His visit opened a new level of debate in Israel about the possibility of negotiations with the Syrian president, Bashar Al-Assad.</td><td>NA</td><td>/person/nationality</td><td>0.0(vetted)</td></tr><tr><td>They are in the united states, the United Kingdom and Canada, among other places, but not in the Jewish settlements of the west bank.</td><td>NA</td><td>/administrative_dis
|
| 277 |
+
vision/country</td><td>0.0</td></tr><tr><td>Mr. spielberg and stacey snider, the former Universal Pictures studio chairman who joined DreamWorks last year as chief executive, have sole authority to greenlight films that cost $ 85 million or less.</td><td>NA</td><td>/person/company</td><td>0.088</td></tr></table>
|
| 278 |
+
|
| 279 |
+
Figure 6: A case study of active testing approach for distantly supervised relation extraction. The entities are labeled in red. $1.0(\text{vetted})$ and $0.0(\text{vetted})$ mean that the entity pair is vetted in our method.
|
| 280 |
+
|
| 281 |
+
affected by the unbalanced instance numbers of different relations, which will make label generator over-fitting to frequent labels. 4) For the overall performance indicated by PR curves, BGRU is the most solid relation extractor.
|
activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c666bd6c57581fe3a76ec4d54b6a757ccecac90b9e9bb1cd6090ae809b476ced
|
| 3 |
+
size 398639
|
activetestinganunbiasedevaluationmethodfordistantlysupervisedrelationextraction/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6982e4bc021a7914592f07b657fc26d5d77dbed80188f15130d5c84283b4d361
|
| 3 |
+
size 304243
|
actordoublecriticincorporatingmodelbasedcriticfortaskorienteddialoguesystems/ea6019ac-bb6b-406e-aca5-4d658eddd193_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:738d5f21aadc6dccdb898dbdf973d56de9aafa0425a36c1144d74192ddefc16c
|
| 3 |
+
size 65737
|
actordoublecriticincorporatingmodelbasedcriticfortaskorienteddialoguesystems/ea6019ac-bb6b-406e-aca5-4d658eddd193_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c70eded82ad25000f3c9644a454aba2b2f993e6aaa197fafa9c01e9d7962876a
|
| 3 |
+
size 82462
|