| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:22:06.560346Z" |
| }, |
| "title": "Correcting the Misuse: A Method for the Chinese Idiom Cloze Test", |
| "authors": [ |
| { |
| "first": "Xinyu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "State Key Laboratory of Networking and Switching Technology", |
| "institution": "Beijing University of Posts and Telecommunications", |
| "location": {} |
| }, |
| "email": "xinyu.wang@bupt.edu.cn" |
| }, |
| { |
| "first": "Hongsheng", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing University of Posts and Telecommunications 1", |
| "location": {} |
| }, |
| "email": "zhaohs@bupt.edu.cn" |
| }, |
| { |
| "first": "Tan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing University of Posts and Telecommunications 1", |
| "location": {} |
| }, |
| "email": "tyang@bupt.edu.cn" |
| }, |
| { |
| "first": "Hongbo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "State Key Laboratory of Networking and Switching Technology", |
| "institution": "Beijing University of Posts and Telecommunications", |
| "location": {} |
| }, |
| "email": "hbwang@bupt.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The cloze test for Chinese idioms is a new challenge in machine reading comprehension: given a sentence with a blank, choosing a candidate Chinese idiom which matches the context. Chinese idiom is a type of Chinese idiomatic expression. The common misuse of Chinese idioms leads to error in corpus and causes error in the learned semantic representation of Chinese idioms. In this paper, we introduce the definition written by Chinese experts to correct the misuse. We propose a model for the Chinese idiom cloze test integrating various information effectively. We propose an attention mechanism called Attribute Attention to balance the weight of different attributes among different descriptions of the Chinese idiom. Besides the given candidates of every blank, we also try to choose the answer from all Chinese idioms that appear in the dataset as the extra loss due to the uniqueness and specificity of Chinese idioms. In experiments, our model outperforms the state-of-the-art model.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The cloze test for Chinese idioms is a new challenge in machine reading comprehension: given a sentence with a blank, choosing a candidate Chinese idiom which matches the context. Chinese idiom is a type of Chinese idiomatic expression. The common misuse of Chinese idioms leads to error in corpus and causes error in the learned semantic representation of Chinese idioms. In this paper, we introduce the definition written by Chinese experts to correct the misuse. We propose a model for the Chinese idiom cloze test integrating various information effectively. We propose an attention mechanism called Attribute Attention to balance the weight of different attributes among different descriptions of the Chinese idiom. Besides the given candidates of every blank, we also try to choose the answer from all Chinese idioms that appear in the dataset as the extra loss due to the uniqueness and specificity of Chinese idioms. In experiments, our model outperforms the state-of-the-art model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The Chinese idiom comprehension requires the ability to understand Chinese idioms. Chinese idiom, which is called \"\u6210\u8bed\" (chengyu) in Chinese, consists of four characters. Chinese idioms are mostly derived from stories in ancient literature from Chinese history, and often reflect the moral behind the stories. To measure the ability of understanding Chinese idioms, the Chinese idiom cloze test dataset was proposed (Zheng et al., 2019) : given a sentence with a blank, an examinee is required to choose an idiom which best matches the context surrounding the blank. Table 1 shows an example of the Chinese idiom cloze test.", |
| "cite_spans": [ |
| { |
| "start": 415, |
| "end": 435, |
| "text": "(Zheng et al., 2019)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 566, |
| "end": 573, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The misuse of Chinese idioms is prevalent among Chinese native speakers who did not receive a professional Chinese education. Due to the metaphorical meaning of Chinese idioms, even Chinese native speakers who do not major in Chinese would use a Chinese idiom with its literal meaning, which causes misuse. Table 2 shows some common misuses of Chinese idioms. The misuse meaning is often related to the literal meaning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 307, |
| "end": 314, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The misuse of Chinese idiom appears in various social media and text such as Weibo and Zhihu. The Chinese word embeddings and Chinese language models are pretrained on these corpora that contain the misuse of Chinese idioms and learn the incorrect meaning of Chinese idioms. For example, in Table 3 , we use Google Translate to translate Chinese idioms finding that some results are incorrect, and the incorrect meanings happen to be the common misuses of these Chinese idioms. In this paper, we introduce the definition of Chinese idiom, which is written by the Chinese experts, to correct the misuse. The complete definition describes the accurate interpretation and usage of Chinese idioms. Besides, because the misuse often comes from the literal meaning of the Chinese idiom, we propose an attention mechanism called Attribute Attention that extracts the relationships between the character-level and word-level representations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 291, |
| "end": 298, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Moreover, using the definition to correct the misuse does not mean that the non-misuse part would be dropped. Take \u4e03\u6708\u6d41\u706b in Table 2 as an example. The common misuse of \u4e03\u6708\u6d41\u706b is not totally incorrect. \u4e03\u6708\u6d41\u706b referring to the weather is correct, but the weather turning hot is incorrect. Therefore, we propose Attribute Attention to make use of other representations of \u4e03\u6708\u6d41\u706b even if they contain incorrect information.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 123, |
| "end": 130, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In addition, Chinese idioms are derived from stories in ancient literature and contain abundant information. Chinese idioms contain more information so they are more likely to be used in a more specific context than common words. For example, \u7f8e means \"beautiful\", \u8f6e means \"wheel\", Sentence with a blank \u4ed6\u4eec\u5e0c\u671b\u80fd \u518d\u8fdb\u4e00\u6b65 They hope that they can and achieve greater success.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A candidate idiom \u767e\u5c3a\u7aff\u5934 Literal translation: at the top of a hundred-foot pole.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Free translation: make still further progress.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "When one has achieved great success, one should continue to work hard to make greater progress. and \u5942 means \"magnificent\". The Chinese idiom \u7f8e\u8f6e\u7f8e\u5942 means \"a building is beautiful\". \u7f8e\u8f6e \u7f8e\u5942 can be used only when describing a building, whereas those four characters are not related to building. When those four characters are combined, the meaning becomes narrow. It is more difficult to find two similar Chinese idioms than normal words. In this paper, besides choosing the answer from the given candidates, our model tries to choose the answer from the whole vocabulary of candidate Chinese idioms that appear in the dataset and calculate its loss as a part of the final loss. In this way, relationships between much more idioms can be captured every time. It costs very few extra computing resources but provides significant improvement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u6bd4\u55bb\u5230\u4e86\u6781\u9ad8\u7684\u5883\u5730\uff0c\u4ecd\u987b\u7ee7\u7eed\u52aa\u529b\uff0c\u6c42\u66f4\u5927\u7684\u8fdb\u6b65\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "In experiments, our model outperforms the stateof-the-art model. Our main contributions are summarized as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u6bd4\u55bb\u5230\u4e86\u6781\u9ad8\u7684\u5883\u5730\uff0c\u4ecd\u987b\u7ee7\u7eed\u52aa\u529b\uff0c\u6c42\u66f4\u5927\u7684\u8fdb\u6b65\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We introduce the definition and propose Attribute Attention to balance the importance of different representations of the Chinese idiom.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u6bd4\u55bb\u5230\u4e86\u6781\u9ad8\u7684\u5883\u5730\uff0c\u4ecd\u987b\u7ee7\u7eed\u52aa\u529b\uff0c\u6c42\u66f4\u5927\u7684\u8fdb\u6b65\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We add an extra loss obtained by choosing the answer from all Chinese idioms that appear in the dataset, which costs very few extra computing resources but provides significant improvement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u6bd4\u55bb\u5230\u4e86\u6781\u9ad8\u7684\u5883\u5730\uff0c\u4ecd\u987b\u7ee7\u7eed\u52aa\u529b\uff0c\u6c42\u66f4\u5927\u7684\u8fdb\u6b65\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "The cloze test is a classic task of reading comprehension and many methods were proposed (Hermann et al., 2015; Chen et al., 2016; Wang et al., 2018; . The Chinese idiom cloze test is more challenging because Chinese idioms convey the metaphorical meaning and are misused sometimes. Most works related to idioms focused on English idioms identification (Gedigian et al., 2006; Katz and Giesbrecht, 2006; Fazly et al., 2009; Shutova et al., 2010; Salton et al., 2016; Do Dinh et al., 2018b; Flor and Beigman Klebanov, 2018; Do Dinh et al., 2018a; Liu and Hwa, 2018) . Some works have tried to use definitions: Spasic et al. (2017) analyzed the sentiment of definitions; Fathima Shirin and Raseek (2018) used the similarity between different definitions. However, these methods introduced definitions but did not try to understand them. Liu et al. (2017) used CharLSTM to encode the meaning of idioms, which has a similar idea to . Only a few works have been done with Chinese idioms such as building Chinese emotion lexicons (Xu et al., 2010) initions and the attention mechanism of Attentive Reader (AR) (Hermann et al., 2015; Chen et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 111, |
| "text": "(Hermann et al., 2015;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 112, |
| "end": 130, |
| "text": "Chen et al., 2016;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 131, |
| "end": 149, |
| "text": "Wang et al., 2018;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 353, |
| "end": 376, |
| "text": "(Gedigian et al., 2006;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 377, |
| "end": 403, |
| "text": "Katz and Giesbrecht, 2006;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 404, |
| "end": 423, |
| "text": "Fazly et al., 2009;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 424, |
| "end": 445, |
| "text": "Shutova et al., 2010;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 446, |
| "end": 466, |
| "text": "Salton et al., 2016;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 467, |
| "end": 489, |
| "text": "Do Dinh et al., 2018b;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 490, |
| "end": 522, |
| "text": "Flor and Beigman Klebanov, 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 523, |
| "end": 545, |
| "text": "Do Dinh et al., 2018a;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 546, |
| "end": 564, |
| "text": "Liu and Hwa, 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 609, |
| "end": 629, |
| "text": "Spasic et al. (2017)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 835, |
| "end": 852, |
| "text": "Liu et al. (2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1024, |
| "end": 1041, |
| "text": "(Xu et al., 2010)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1104, |
| "end": 1126, |
| "text": "(Hermann et al., 2015;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1127, |
| "end": 1145, |
| "text": "Chen et al., 2016)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Formally, the Chinese idiom cloze test requires the model to choose the correct answer from a number of the candidate idioms given a sentence with a blank. The sentence is defined as a sequence of characters with a blank, which is also called context in the following. The candidate Chinese idiom is defined as a sequence of four characters, which is called idiom in the following. The definition is defined as a sequence of characters interpreting the corresponding idiom. In this paper, the term \"BERT\" refers to the BERT-like models (Devlin et al., 2019; Liu et al., 2019; Lan et al., 2019; , because any one of them and even the new BERT-like model in the future can be used in our model. Figure 1 is an overview of our model. The following sections will introduce every part of our model one by one.", |
| "cite_spans": [ |
| { |
| "start": 536, |
| "end": 557, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 558, |
| "end": 575, |
| "text": "Liu et al., 2019;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 576, |
| "end": 593, |
| "text": "Lan et al., 2019;", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 693, |
| "end": 701, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The definition is not the next sentence of the context. The context and definition do not belong to the same document. It is inappropriate to set the context as the first sentence and set the definition as the second sentence separated by [SEP] for BERT. In this section, as shown in Figure 2 , we propose a way to integrate the context and definition with BERT, which lets the model \"know\" that the definition is mainly related to the idiom. We input the context, the candidate idiom, and definition together. For example, we input the context \"\u4ed6 \u4eec \u5e0c \u671b \u80fd \u518d \u8fdb \u4e00 \u6b65 (they hope they can and achieve greater success) \", the candidate idiom \"\u767e \u5c3a \u7aff \u5934 (make still further progress) \", and the definition \"\u6bd4\u55bb\u9ad8\u7684\u6210\u5c31 (an outstanding achievement) \" together as \"\u4ed6\u4eec\u5e0c\u671b \u80fd The Multi-Head Attention is applied to the context and definition in different ways. Formally, the Multi-Head Attention for the context is:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 284, |
| "end": 292, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "v (l) i =MultiHeadAttention(m (l\u22121) , v (l\u22121) 1 , v (l\u22121) 2 , . . . , v (l\u22121) |v| ) (1) where v", |
| "eq_num": "(l)" |
| } |
| ], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "i denotes the i-th character of the context at the l-th layer, and m (l) denotes the [MASK] token at the l-th layer; |v| denotes the number of characters of the context. The context only can \"see\" itself and the [MASK] .", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 91, |
| "text": "[MASK]", |
| "ref_id": null |
| }, |
| { |
| "start": 212, |
| "end": 218, |
| "text": "[MASK]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The Multi-Head Attention for the definition is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "d (l) i =MultiHeadAttention(m (l\u22121) , v (l\u22121) [SEP ] , d (l\u22121) 1 , d (l\u22121) 2 , . . . , d (l\u22121) |d| )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "d (l)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "i denotes the i-th character of the definition d at the l-th layer, and v (l\u22121)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "[SEP ] denotes the first [SEP] token at the l-th layer; |d| denotes the number of characters of the definition. The definition is inaccessible to the context, which avoids that the BERT regards the definition as the next sentence of the context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The Multi-Head Attention for the [MASK] is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "m (l) =MultiHeadAttention(m (l\u22121) , v (l\u22121) 1 , v (l\u22121) 2 , . . . , v (l\u22121) |v| , d (l\u22121) 1 , d (l\u22121) 2 , . . . , d (l\u22121) |d| ) (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The [MASK] can pay attention to the characters of both the context and definition. On the one hand, ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Context and Definition", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "This section is about how to do Attribute Attention and the preparations. In the beginning, we extract the summaries of the context, idiom, and definition. Then we calculate the weight of Attribute Attention with h m from Section 3.1. After that, Attribute Attention will be done with these summaries and the weight.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute Attention", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Summarizing context is to predict what kind of idiom would be the correct answer for the blank based on the contextual information. For example, in Figure 3a , the sentence is \"\u4ed6\u4eec\u5e0c\u671b\u80fd \u518d\u8fdb \u4e00\u6b65 (they hope they can and achieve greater success) \". The input is \"\u4ed6\u4eec\u5e0c\u671b\u80fd[MASK]\u518d \u8fdb\u4e00\u6b65\". The output of [MASK] is defined as h c as shown in Figure 3a .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 148, |
| "end": 157, |
| "text": "Figure 3a", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 325, |
| "end": 334, |
| "text": "Figure 3a", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Summarizing Context", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "We use BERT to extract and summary characterlevel information of Chinese idiom. The output is defined as h o , as shown in Figure 3b .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 123, |
| "end": 132, |
| "text": "Figure 3b", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Summarizing Idiom", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "The context and candidate idioms are from the same corpus and share a similar contextual representation. Besides, the [CLS] is not used when summarizing context. Therefore, we use one BERT to model both the context and idiom and use the [CLS] to summarize idioms. In the example of Figure 3b , the candidate idiom is \"\u767e \u5c3a \u7aff \u5934 (achieve great achievement) \". The input is \"[CLS]\u767e\u5c3a\u7aff\u5934\"", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 282, |
| "end": 291, |
| "text": "Figure 3b", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Summarizing Idiom", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Introducing the definitions can correct the misuse of idioms. We use [CLS] to summary definition. In the example of Figure 3c , the definition is \"\u6bd4 \u55bb\u9ad8\u7684\u6210\u5c31 (an outstanding achievement) \". The input is \"[CLS]\u6bd4\u55bb\u9ad8\u7684\u6210\u5c31\". The output of [CLS] is defined as h d .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 116, |
| "end": 125, |
| "text": "Figure 3c", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Summarizing Definition", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "We use word embeddings to extract word-level information in this section. To utilize more information from various corpora, more than one word embedding can be introduced. Different attributes of different word embeddings will be assigned different weights in Attribute Attention. The word embeddings from different sources of one idiom are defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding of Idiom", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "{e i } |e| i=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding of Idiom", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": ", where |e| is the number of word embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding of Idiom", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "As shown in Figure 1 , this section is about generating the weight with h m and {e i } |e| i=1 . For the standard attention mechanism, the attention weight is a series of scalars, whereas the attention weight is a series of vectors in Attribute Attention.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Weight Generation", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "5 V V V V V V 0 0 0 9 \u4ed6 \u6b65 Token Embedding Segment Embedding Position Embedding M D D M D D M D D \u2026\u2026 \u2026\u2026 \u2026\u2026 \u2026\u2026 \u2026\u2026 \u2026\u2026 \u2026\u2026 \u2026\u2026 \u2026\u2026 V V M D D \u2026\u2026 \u2026\u2026 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 0 5 MASK 1 11 \u767e 1 22 SEP V V V 0 10 SEP V \u2026 Context", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weight Generation", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "Idiom+Definition ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weight Generation", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "a <i> m = W <i> m h m", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Weight Generation", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "where W <i> m \u2208 R m\u00d7b is a learnable parameter; m denotes the hidden size of attention, and b denotes the hidden size of BERT such as 768 or 1024.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weight Generation", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "h m generates the weight based on the context, which is more accurate but also more likely to overfit. The weight {a <i> m } |e|+2 i=1 may \"remember\" every context-idiom pair in the training set. |e| is the number of word embeddings. In this case, we also introduce word embeddings here. The word embedding cannot provide context information but will have stronger generalization ability because it is hard to overfit the training set unless an idiom only appears several times. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weight Generation", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "where a <i> \u2208 R m . In this way, we can have accuracy and generalization from the two weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weight Generation", |
| "sec_num": "3.2.5" |
| }, |
| { |
| "text": "We define a <i> j as the j-th element of a <i> . In other words a <i> j is the j-th element of the i-th ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "6 \u5e0c [MASK] \u671b \u80fd \u518d \u8fdb \u4e00 \u6b65 BERT \u4eec \u4ed6 c h (a) Context [CLS] \u767e \u5c3a \u7aff \u5934 BERT o h (b) Idiom [CLS] \u6bd4 \u55bb \u9ad8 \u7684 BERT \u6210 \u5c31 d h (c) Definition", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "After that, before applying the attention:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h o \u2190 W ao h o h d \u2190 W ad h d e i \u2190 W ae i e i", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "where W ao \u2208 R m\u00d7b , W ad \u2208 R m\u00d7b , and W ae i \u2208 R m\u00d7d are learnable parameters; m denotes the hidden size of attention, b denotes the hidden size of BERT, d denotes the size of word embedding. As shown in Figure 1 , the attention goes through as:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 206, |
| "end": 214, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "h j = a <|e|+1> j h o j + a <|e|+2> j h d j + |e| i=1 a <i> j e i j (9)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "where h j is the j-th element of the output which is defined as h \u2208 R m ; h o j is the j-th element of h o , h d j is the j-th element of h d , and e i j is the j-th element of e i ;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "h contains an accurate and correct description of an idiom under a certain context by choosing information from the idiom, definition, and word embeddings. The correct and important part of every representation remains, and the incorrect and unimportant part is dropped.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "The final output of Attribute Attention is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "u a = h T W ua h c", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "where W ua \u2208 R m\u00d7b is a learnable parameter. u a \u2208 R 1 is the score to describe whether a candidate idiom is the correct answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Calculation", |
| "sec_num": "3.2.6" |
| }, |
| { |
| "text": "This section will introduce the classification part in Figure 1 . One reason for Attribute Attention summarizing the context and definition is to make use of word embedding. Using h m for classification can provide more details about the relationship between characters of the context and characters of the definition. Formally, the classification for h m is:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 55, |
| "end": 63, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "u m = W cm h m + b cm (11)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where W cm \u2208 R 1\u00d7b and b cm \u2208 R 1 are learnable parameters. u m \u2208 R 1 is the score describing whether a candidate idiom is the correct answer. u a and u m denote the score of one candidate idiom. We further define the {u ai } n i=1 and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "{u mi } n i=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "as the scores of all candidate idioms, where n denotes the number of candidate idioms. Then we add them up:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "u si = u ai + u mi", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "and pass u si through softmax function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p i = e u si n k=1 e u sk", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "p i is the possibility for the i-th candidate idiom to be the correct answer. This is the end of inferring but not training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Because Chinese idioms are used in more unique and specific context than common words, we choose the answer from all Chinese idioms that appear in the whole cloze test dataset as an extra loss for training. Formally, we use h c to predict the correct answer from the whole vocabulary of candidate Chinese idioms:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extra Loss", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "u c = W cv h c + b v", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Extra Loss", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where W cv \u2208 R v\u00d7b and b v \u2208 R v are learnable parameters; v denotes the number of all candidate Chinese idioms which is much larger than n.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extra Loss", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "q = softmax(u c ) (15) q \u2208 R v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extra Loss", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "are possibilities for all candidate idioms being the correct answer. In this way, the model can learn relationships between much more idioms every time. Due to the uniqueness and specificity of Chinese idioms, this will not cause limited noises but improve the performance significantly. Without Extra Loss, relationships between only given candidate idioms are considered every time. When inferring, the max possibility of {p i } n i=1 is the final result. For training, the cross entropy loss of {p i } n i=1 is defined as l p , and the cross entropy loss of q is defined as l q . The final loss is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extra Loss", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "l = l p + \u03b2l q (16)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extra Loss", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where \u03b2 is a hyper-parameter to determine the weight of the loss l q . Empirically, we suggest setting the value of \u03b2 as 0.5. l is the final loss for training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extra Loss", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In this section, we will introduce the details and hyper-parameters for training our model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Dataset ChID dataset (Zheng et al., 2019) is used in experiments. Table 1 shows a simple example of the dataset. Given a sentence with a blank and several candidate Chinese idioms, an examinee is required to choose a Chinese idioms which best matches the context surrounding the blank. The corpus of ChID contain news, novels, and essays. News and novels are treated as in-domain data, which contains a training set, a development set Dev, and a test set Test. Essays are reserved for out-of-domain test Out, which can evaluate the generalization ability. In this way, the model is trained on news and novels but evaluated on essays.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 41, |
| "text": "(Zheng et al., 2019)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 66, |
| "end": 73, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Ran and Sim are two test sets which have the same sentences as Test. In Ran, candidate idioms are not similar to the golden answer. In Sim, candidate idioms are similar idioms to golden answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "BPretrained Model Pretrained RoBERTa-base (Liu et al., 2019) for Chinese with 12 layers and word embeddings from (Song et al., 2018; Li et al., 2018; Qiu et al., 2018) are used.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 60, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 113, |
| "end": 132, |
| "text": "(Song et al., 2018;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 133, |
| "end": 149, |
| "text": "Li et al., 2018;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 150, |
| "end": 167, |
| "text": "Qiu et al., 2018)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Hyper-parameters n is 7 because there are seven candidate idioms for every blank in ChID dataset (Zheng et al., 2019) . v is 3848 because ChID dataset (Zheng et al., 2019) contains 3848 candidate idioms in total. The hidden size of attention m is 100. \u03b2 as 0.5.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 117, |
| "text": "(Zheng et al., 2019)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 151, |
| "end": 171, |
| "text": "(Zheng et al., 2019)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Optimizer The optimizer is Adam (Kingma and Ba, 2014) for BERT with linear schedule and a warm-up ratio of 0.05. The learning rate for RoBERTa is 2e-5, and for other parameters is 1e-3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Parameters number The number of parameters of our model for experiments is 322M. The learnable parameters are initialized by (He et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 142, |
| "text": "(He et al., 2015)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "GPU & Environment The model is running on a GPU of NVIDIA GeForce RTX 2080 Ti. Due to the limited GPU RAM, we use gradient accumulation for training. The operating system is Ubuntu 18.04. We use PyTorch 1.4.0 (Paszke et al., 2019) and Transformers 2.4.1 to implement our model. We also use mixed precision training with NVIDIA Apex 0.1 (Micikevicius et al., 2017) to accelerate our model. It takes an average of 42 hours per epoch, and the model achieves the best result within 10 epochs.", |
| "cite_spans": [ |
| { |
| "start": 209, |
| "end": 230, |
| "text": "(Paszke et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 336, |
| "end": 363, |
| "text": "(Micikevicius et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Metrics The metric for evaluation is the accuracy, which is implemented by Scikit-learn (Pedregosa et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 112, |
| "text": "(Pedregosa et al., 2011)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The description of other models are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "AR Attentive Reader (AR) (Hermann et al., 2015) . AR uses an attention mechanism to read the sentence.", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 47, |
| "text": "(Hermann et al., 2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "SAR Stanford Attentive Reader (SAR) (Chen et al., 2016) . SAR is a improvement based on AR.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 55, |
| "text": "(Chen et al., 2016)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "CR Chengyu Reader (CR) . CR extracts the summary of definition and adopts a similar attention mechanism of AR. (Chen et al., 2016) 71.7 71.5 80.0 64.9 61.7 CR 74.1 73.5 82.8 68.5 65.2 EAR 74.6 74.5 84.4 67.9 65.5 AR-RoBERTa (Hermann et al., 2015; Liu et al., 2019) 77.1 77.1 89.0 68.9 70.9 SAR-RoBERTa (Chen et al., 2016; Liu et al., 2019) 76.3 76.7 88.5 68.0 69.8 CR-RoBERTa Liu et al., 2019) 78.0 78.3 89.9 70.0 71.7 EAR-RoBERTa Liu et al., 2019) which is the same as our model. Both LSTM and RoBERTa provides contextual information. Table 4 shows the accuracies of all methods. The result of human is given by (Zheng et al., 2019) . Our model outperforms all other models in Dev, Test, Ran, Sim, and Out. Besides, our model has much better generalization ability. For example, comparing with EAR-RoBERTa, our model has a 3.9% improvement on Test but 5.3% on Out.", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 130, |
| "text": "(Chen et al., 2016)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 224, |
| "end": 246, |
| "text": "(Hermann et al., 2015;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 247, |
| "end": 264, |
| "text": "Liu et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 302, |
| "end": 321, |
| "text": "(Chen et al., 2016;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 322, |
| "end": 339, |
| "text": "Liu et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 376, |
| "end": 393, |
| "text": "Liu et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 431, |
| "end": 448, |
| "text": "Liu et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 613, |
| "end": 633, |
| "text": "(Zheng et al., 2019)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 536, |
| "end": 543, |
| "text": "Table 4", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "This section explores how \u03b2 influence the accuracy of our model on Test. Figure 4 shows the results. When \u03b2 = 0, the Extra Loss is not used, which shows the performance of our model that does not use Extra Loss. The accuracy increase very quickly when \u03b2 < 0.3. The accuracy reaches the highest point when \u03b2 = 0.5. The accuracy start decreasing slowly when \u03b2 > 1. A larger \u03b2 makes the extra loss l q too important and overshadow the normal loss l p , which makes the model deviate from its purpose. Extra Loss gives a significant improvement and costs very few computing resources.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 73, |
| "end": 81, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Extra Loss Studies", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this paper, we propose a model for the Chinese idiom cloze test. We introduce the definition and propose Attribute Attention to balance the importance of different representations of the Chinese idiom. We add Extra Loss calculated by choosing the answer from the whole vocabulary of Chinese idioms to improve the performance further, which costs very few computing resources. In experiments, our model outperforms state-of-the-art method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "An agent-based approach to Chinese word segmentation", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "K" |
| ], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Mickey", |
| "middle": [ |
| "W C" |
| ], |
| "last": "Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chong", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Sixth SIGHAN Workshop on Chinese Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel W.K. Chan and Mickey W.C. Chong. 2008. An agent-based approach to Chinese word segmenta- tion. In Proceedings of the Sixth SIGHAN Workshop on Chinese Language Processing.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A thorough examination of the cnn/daily mail reading comprehension task", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Bolton", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.02858" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen, Jason Bolton, and Christopher D Man- ning. 2016. A thorough examination of the cnn/daily mail reading comprehension task. arXiv preprint arXiv:1606.02858.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Killing four birds with two stones: Multitask learning for non-literal language detection", |
| "authors": [ |
| { |
| "first": "Erik-L\u00e2n Do", |
| "middle": [], |
| "last": "Dinh", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Eger", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1558--1569", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erik-L\u00e2n Do Dinh, Steffen Eger, and Iryna Gurevych. 2018a. Killing four birds with two stones: Multi- task learning for non-literal language detection. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1558-1569, Santa Fe, New Mexico, USA. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "One size fits all? a simple LSTM for nonliteral token and construction-level classification", |
| "authors": [ |
| { |
| "first": "Erik-L\u00e2n Do", |
| "middle": [], |
| "last": "Dinh", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Eger", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Second Joint SIGHUM Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature", |
| "volume": "", |
| "issue": "", |
| "pages": "70--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erik-L\u00e2n Do Dinh, Steffen Eger, and Iryna Gurevych. 2018b. One size fits all? a simple LSTM for non- literal token and construction-level classification. In Proceedings of the Second Joint SIGHUM Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature, pages 70-80, Santa Fe, New Mexico. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Replacing idioms based on their figurative usage", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Fathima Shirin", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Raseek", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "2018 International Conference on Emerging Trends and Innovations in Engineering and Technological Research (ICETIETR)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICETIETR.2018.8529042" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Fathima Shirin and C. Raseek. 2018. Replacing id- ioms based on their figurative usage. In 2018 Inter- national Conference on Emerging Trends and Inno- vations in Engineering and Technological Research (ICETIETR), pages 1-6.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Unsupervised type and token identification of idiomatic expressions", |
| "authors": [ |
| { |
| "first": "Afsaneh", |
| "middle": [], |
| "last": "Fazly", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Cook", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzanne", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Computational Linguistics", |
| "volume": "35", |
| "issue": "1", |
| "pages": "61--103", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/coli.08-010-R1-07-048" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Afsaneh Fazly, Paul Cook, and Suzanne Stevenson. 2009. Unsupervised type and token identification of idiomatic expressions. Computational Linguistics, 35(1):61-103.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Catching idiomatic expressions in EFL essays", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Flor", |
| "suffix": "" |
| }, |
| { |
| "first": "Beata", |
| "middle": [], |
| "last": "Beigman Klebanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Workshop on Figurative Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "34--44", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-0905" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Flor and Beata Beigman Klebanov. 2018. Catching idiomatic expressions in EFL essays. In Proceedings of the Workshop on Figurative Lan- guage Processing, pages 34-44, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Atnet: Answering cloze-style questions via intraattention and inter-attention", |
| "authors": [ |
| { |
| "first": "Chengzhen", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuntao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "242--252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chengzhen Fu, Yuntao Li, and Yan Zhang. 2019. Atnet: Answering cloze-style questions via intra- attention and inter-attention. In Advances in Knowl- edge Discovery and Data Mining, pages 242-252, Cham. Springer International Publishing.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Ea reader: Enhance attentive reader for cloze-style question answering via multi-space context fusion", |
| "authors": [ |
| { |
| "first": "Chengzhen", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "33", |
| "issue": "", |
| "pages": "6375--6382", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chengzhen Fu and Yan Zhang. 2019. Ea reader: En- hance attentive reader for cloze-style question an- swering via multi-space context fusion. In Proceed- ings of the AAAI Conference on Artificial Intelli- gence, volume 33, pages 6375-6382.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Catching metaphors", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gedigian", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bryant", |
| "suffix": "" |
| }, |
| { |
| "first": "Srini", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Branimir", |
| "middle": [], |
| "last": "Ciric", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Third Workshop on Scalable Natural Language Understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "41--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Gedigian, John Bryant, Srini Narayanan, and Bra- nimir Ciric. 2006. Catching metaphors. In Proceed- ings of the Third Workshop on Scalable Natural Lan- guage Understanding, pages 41-48, New York City, New York. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Delving deep into rectifiers: Surpassing human-level performance on imagenet classification", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "The IEEE International Conference on Computer Vision (ICCV)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2015. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In The IEEE International Conference on Computer Vision (ICCV).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Teaching machines to read and comprehend", |
| "authors": [ |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Moritz Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Kocisky", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Lasse", |
| "middle": [], |
| "last": "Espeholt", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Kay", |
| "suffix": "" |
| }, |
| { |
| "first": "Mustafa", |
| "middle": [], |
| "last": "Suleyman", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "28", |
| "issue": "", |
| "pages": "1693--1701", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefen- stette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In C. Cortes, N. D. Lawrence, D. D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Information Processing Systems 28, pages 1693-1701. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Chengyu cloze test", |
| "authors": [ |
| { |
| "first": "Zhiying", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Boliang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lifu", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "154--158", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-0516" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiying Jiang, Boliang Zhang, Lifu Huang, and Heng Ji. 2018. Chengyu cloze test. In Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 154-158, New Orleans, Louisiana. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Automatic identification of non-compositional multiword expressions using latent semantic analysis", |
| "authors": [ |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugenie", |
| "middle": [], |
| "last": "Giesbrecht", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Workshop on Multiword Expressions: Identifying and Exploiting Underlying Properties", |
| "volume": "", |
| "issue": "", |
| "pages": "12--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Graham Katz and Eugenie Giesbrecht. 2006. Au- tomatic identification of non-compositional multi- word expressions using latent semantic analysis. In Proceedings of the Workshop on Multiword Expres- sions: Identifying and Exploiting Underlying Prop- erties, pages 12-19, Sydney, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Albert: A lite bert for self-supervised learning of language representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.11942" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learn- ing of language representations. arXiv preprint arXiv:1909.11942.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Analogical reasoning on chinese morphological and semantic relations", |
| "authors": [ |
| { |
| "first": "Shen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhe", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Renfen", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wensi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyong", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "138--143", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shen Li, Zhe Zhao, Renfen Hu, Wensi Li, Tao Liu, and Xiaoyong Du. 2018. Analogical reasoning on chi- nese morphological and semantic relations. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), pages 138-143. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Heuristically informed unsupervised idiom usage recognition", |
| "authors": [ |
| { |
| "first": "Changsheng", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Hwa", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1723--1731", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1199" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Changsheng Liu and Rebecca Hwa. 2018. Heuristi- cally informed unsupervised idiom usage recogni- tion. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1723-1731, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Idiom-aware compositional distributed semantics", |
| "authors": [ |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaiyu", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1204--1213", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1124" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengfei Liu, Kaiyu Qian, Xipeng Qiu, and Xuan- jing Huang. 2017. Idiom-aware compositional dis- tributed semantics. In Proceedings of the 2017 Con- ference on Empirical Methods in Natural Language Processing, pages 1204-1213, Copenhagen, Den- mark. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Mixed precision training", |
| "authors": [ |
| { |
| "first": "Paulius", |
| "middle": [], |
| "last": "Micikevicius", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonah", |
| "middle": [], |
| "last": "Alben", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Diamos", |
| "suffix": "" |
| }, |
| { |
| "first": "Erich", |
| "middle": [], |
| "last": "Elsen", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Garcia", |
| "suffix": "" |
| }, |
| { |
| "first": "Boris", |
| "middle": [], |
| "last": "Ginsburg", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Houston", |
| "suffix": "" |
| }, |
| { |
| "first": "Oleksii", |
| "middle": [], |
| "last": "Kuchaiev", |
| "suffix": "" |
| }, |
| { |
| "first": "Ganesh", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.03740" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen, David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, et al. 2017. Mixed precision training. arXiv preprint arXiv:1710.03740.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Pytorch: An imperative style, high-performance deep learning library", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Paszke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Massa", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lerer", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Chanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Killeen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeming", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Gimelshein", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Antiga", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "8024--8035", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. 2019. Pytorch: An imperative style, high-performance deep learning library. In Ad- vances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Scikit-learn: Machine learning in Python", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pedregosa", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Varoquaux", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gramfort", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Thirion", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Grisel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Blondel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Dubourg", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Vanderplas", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cournapeau", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Brucher", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Perrot", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Duchesnay", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Revisiting correlations between intrinsic and extrinsic evaluations of word embeddings", |
| "authors": [ |
| { |
| "first": "Yuanyuan", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongzheng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingdi", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Renfen", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lijiao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Chinese Computational Linguistics and Natural Language Processing Based on Naturally Annotated Big Data", |
| "volume": "", |
| "issue": "", |
| "pages": "209--221", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuanyuan Qiu, Hongzheng Li, Shen Li, Yingdi Jiang, Renfen Hu, and Lijiao Yang. 2018. Revisiting cor- relations between intrinsic and extrinsic evaluations of word embeddings. In Chinese Computational Linguistics and Natural Language Processing Based on Naturally Annotated Big Data, pages 209-221. Springer.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Idiom token classification using sentential distributed semantics", |
| "authors": [ |
| { |
| "first": "Giancarlo", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Ross", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Kelleher", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "194--204", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1019" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giancarlo Salton, Robert Ross, and John Kelleher. 2016. Idiom token classification using sentential distributed semantics. In Proceedings of the 54th Annual Meeting of the Association for Computa- tional Linguistics (Volume 1: Long Papers), pages 194-204, Berlin, Germany. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Distilbert, a distilled version of bert: Smaller, faster, cheaper and lighter", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.01108" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: Smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Metaphor identification using verb and noun clustering", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "Lin", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1002--1010", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Shutova, Lin Sun, and Anna Korhonen. 2010. Metaphor identification using verb and noun cluster- ing. In Proceedings of the 23rd International Con- ference on Computational Linguistics (Coling 2010), pages 1002-1010, Beijing, China. Coling 2010 Or- ganizing Committee.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Directional skip-gram: Explicitly distinguishing left and right context for word embeddings", |
| "authors": [ |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Haisong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "175--180", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-2028" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yan Song, Shuming Shi, Jing Li, and Haisong Zhang. 2018. Directional skip-gram: Explicitly distinguish- ing left and right context for word embeddings. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Compu- tational Linguistics: Human Language Technolo- gies, Volume 2 (Short Papers), pages 175-180, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Idiom-based features in sentiment analysis: Cutting the gordian knot", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Spasic", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Buerki", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IEEE Transactions on Affective Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "1--1", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TAFFC.2017.2777842" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "I. Spasic, L. Williams, and A. Buerki. 2017. Id- iom-based features in sentiment analysis: Cutting the gordian knot. IEEE Transactions on Affective Computing, pages 1-1.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Enhancing Chinese word segmentation using unlabeled data", |
| "authors": [ |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "970--979", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weiwei Sun and Jia Xu. 2011. Enhancing Chinese word segmentation using unlabeled data. In Pro- ceedings of the 2011 Conference on Empirical Meth- ods in Natural Language Processing, pages 970- 979, Edinburgh, Scotland, UK. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Convolutional neural network with word embeddings for Chinese word segmentation", |
| "authors": [ |
| { |
| "first": "Chunqi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "163--172", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chunqi Wang and Bo Xu. 2017. Convolutional neu- ral network with word embeddings for Chinese word segmentation. In Proceedings of the Eighth Interna- tional Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers), pages 163-172, Taipei, Taiwan. Asian Federation of Natural Lan- guage Processing.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Multiperspective context aggregation for semi-supervised cloze-style reading comprehension", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Kewei", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Meng", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruoyu", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingming", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "857--867", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Wang, Sujian Li, Wei Zhao, Kewei Shen, Meng Sun, Ruoyu Jia, and Jingming Liu. 2018. Multi- perspective context aggregation for semi-supervised cloze-style reading comprehension. In Proceedings of the 27th International Conference on Computa- tional Linguistics, pages 857-867, Santa Fe, New Mexico, USA. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Huggingface's transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R'emi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Brew", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv, abs/1910.03771.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Build Chinese emotion lexicons using a graph-based algorithm and multiple resources", |
| "authors": [ |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinfan", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Houfeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1209--1217", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ge Xu, Xinfan Meng, and Houfeng Wang. 2010. Build Chinese emotion lexicons using a graph-based al- gorithm and multiple resources. In Proceedings of the 23rd International Conference on Computa- tional Linguistics (Coling 2010), pages 1209-1217, Beijing, China. Coling 2010 Organizing Committee.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Subword-augmented embedding for cloze reading comprehension", |
| "authors": [ |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yafang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1802--1814", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuosheng Zhang, Yafang Huang, and Hai Zhao. 2018. Subword-augmented embedding for cloze reading comprehension. In Proceedings of the 27th Inter- national Conference on Computational Linguistics, pages 1802-1814, Santa Fe, New Mexico, USA. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "ChID: A large-scale Chinese IDiom dataset for cloze test", |
| "authors": [ |
| { |
| "first": "Chujie", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Minlie", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Aixin", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "778--787", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1075" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chujie Zheng, Minlie Huang, and Aixin Sun. 2019. ChID: A large-scale Chinese IDiom dataset for cloze test. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 778-787, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "[MASK] \u518d\u8fdb\u4e00\u6b65 [SEP] \u767e\u5c3a\u7aff\u5934:\u6bd4\u55bb\u9ad8 \u7684\u6210\u5c31 [SEP]\". The context is defined as v. The candidate idiom and the definition are defined as d here.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "[MASK] \"knows\" what kind of idiom could match the context as the correct answer. On the other hand, [MASK] \"knows\" the candidate idiom definition.[MASK] integrates the information from context v definition and d in the character-level.In this way, the relation between the context and the definition is built through the[MASK]. The output of the [MASK] is defined as h m . Architecture of our model.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "text": "Integrating the context and definition with BERT, where \"V\" denotes the context, \"D\" denotes the idiom and definition, and \"M\" denotes[MASK]. The Multi-Head Attention is applied to the context, definition, and [MASK] in different ways. The input is \"\u4ed6\u4eec\u5e0c\u671b\u80fd [MASK] \u518d\u8fdb\u4e00\u6b65 [SEP] \u767e\u5c3a\u7aff\u5934:\u6bd4\u55bb\u9ad8\u7684\u6210\u5c31 [SEP]\". h m contains information about the context and idiom. A Chinese idiom may not be misused in all contexts. h m can tell the importance of different attributes of an idiom under a certain context. The attention weight vectors for h m are defined as {a <i> m } |e|+2 i=1 :", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "text": "Summarizing the context, idiom, and definition. vector of {a <i> } |e|+2 i=1. Then the softmax function is applied as:", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "FIGREF4": { |
| "text": "Performance of our model with different \u03b2 on Test.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "text": "An example of the Chinese idiom cloze test that contains a sentence, one of the candidate idioms, and the definition of the idiom.", |
| "content": "<table><tr><td colspan=\"2\">Chinese idiom Literal meaning</td><td>Misuse meaning</td><td>Correct meaning</td></tr><tr><td>\u7ffb\u4e91\u8986\u96e8</td><td>A huge change for</td><td>Magnificent</td><td>Skillful</td></tr><tr><td/><td>clouds and rain</td><td/><td/></tr><tr><td>\u4e03\u6708\u6d41\u706b</td><td>Fire in July</td><td colspan=\"2\">The weather turned hot The weather turned cold</td></tr><tr><td>\u4e09\u4eba\u6210\u864e</td><td>Three persons become</td><td>Cooperation lead to</td><td>Spread rumors</td></tr><tr><td/><td>a tiger</td><td>great strength</td><td/></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "html": null |
| }, |
| "TABREF1": { |
| "text": "", |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "html": null |
| }, |
| "TABREF3": { |
| "text": "Some incorrect translations of Chinese idioms form Google Translate.", |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "html": null |
| }, |
| "TABREF4": { |
| "text": "R m is more generalized but lacks the context. We add them up to get the final weight {a <i> }", |
| "content": "<table><tr><td/><td/><td/><td/><td colspan=\"5\">The attention weight vectors</td></tr><tr><td colspan=\"9\">for word embeddings are defined as {a <i> e</td><td>} |e|+2 i=1 :</td></tr><tr><td/><td colspan=\"2\">a <i> e</td><td>=</td><td>1 |e|</td><td>|e| j</td><td>W e</td><td><i> j</td><td>e j</td><td>(5)</td></tr><tr><td>where W e</td><td><i> j</td><td colspan=\"7\">\u2208 R m\u00d7d is a learnable parameter; d</td></tr><tr><td colspan=\"9\">denotes the size of word embedding such as 300.</td></tr><tr><td colspan=\"9\">a <i> m overfit, whereas a <i> \u2208 R m gives more accurate weight but may e \u2208 |e|+2 i=1 :</td></tr><tr><td/><td colspan=\"8\">a <i> = a <i> m + a <i> e</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "html": null |
| }, |
| "TABREF7": { |
| "text": "Comparison of accuracies of different models on ChID dataset.", |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "html": null |
| } |
| } |
| } |
| } |