| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:21:58.426333Z" |
| }, |
| "title": "ERNIE-NLI: Analyzing the Impact of Domain-Specific External Knowledge on Enhanced Representations for NLI", |
| "authors": [ |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "lbauer6@cs.unc.edu" |
| }, |
| { |
| "first": "Lingjia", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "mbansal@cs.unc.edu" |
| }, |
| { |
| "first": "Unc", |
| "middle": [ |
| "Chapel" |
| ], |
| "last": "Hill", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bloomberg", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We examine the effect of domain-specific external knowledge variations on deep large scale language model performance. Recent work in enhancing BERT with external knowledge has been very popular, resulting in models such as ERNIE (Zhang et al., 2019a). Using the ERNIE architecture, we provide a detailed analysis on the types of knowledge that result in a performance increase on the Natural Language Inference (NLI) task, specifically on the Multi-Genre Natural Language Inference Corpus (MNLI). While ERNIE uses general TransE embeddings, we instead train domain-specific knowledge embeddings and insert this knowledge via an information fusion layer in the ERNIE architecture, allowing us to directly control and analyze knowledge input. Using several different knowledge training objectives, sources of knowledge, and knowledge ablations, we find a strong correlation between knowledge and classification labels within the same polarity, illustrating that knowledge polarity is an important feature in predicting entailment. We also perform classification change analysis across different knowledge variations to illustrate the importance of selecting appropriate knowledge input regarding content and polarity, and show representative examples of these changes.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We examine the effect of domain-specific external knowledge variations on deep large scale language model performance. Recent work in enhancing BERT with external knowledge has been very popular, resulting in models such as ERNIE (Zhang et al., 2019a). Using the ERNIE architecture, we provide a detailed analysis on the types of knowledge that result in a performance increase on the Natural Language Inference (NLI) task, specifically on the Multi-Genre Natural Language Inference Corpus (MNLI). While ERNIE uses general TransE embeddings, we instead train domain-specific knowledge embeddings and insert this knowledge via an information fusion layer in the ERNIE architecture, allowing us to directly control and analyze knowledge input. Using several different knowledge training objectives, sources of knowledge, and knowledge ablations, we find a strong correlation between knowledge and classification labels within the same polarity, illustrating that knowledge polarity is an important feature in predicting entailment. We also perform classification change analysis across different knowledge variations to illustrate the importance of selecting appropriate knowledge input regarding content and polarity, and show representative examples of these changes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recently, the selection and integration of external knowledge into large-scale language models has shown impressive improvements in several Natural Language Understanding (NLU) tasks (Zhang et al., 2019a) . Understanding the relation between external knowledge and model performance is fundamental to understanding how best to select and integrate knowledge into NLU tasks. We focus specifically on Natural Language Inference (NLI), which requires understanding sentence semantics with respect to both the content and polarity. NLI is motivated by recognizing textual entailment, or understanding whether a hypothesis entails, contradicts, or is neutral with respect to a premise. For example, given the premise: \"Some boys are playing soccer\", the hypothesis \"Young men are playing a sport\" is an entailment whereas the hypothesis \"Old men are playing a sport\" is a contradiction. Language modeling is a very common and important approach when considering the NLI task.", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 204, |
| "text": "(Zhang et al., 2019a)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The NLI state-of-the-art utilizes different language modeling techniques to learn the relations between the hypothesis and the premise. Yoon et al. (2018) used Dynamic Self-Attention (DSA) to learn sentence embeddings, Liu et al. (2019) proposed multi-task deep neural network (MT-DNN) for learning language representations in multiple NLU tasks, and Zhang et al. (2019b) combined semantic role labeling and BERT (Devlin et al., 2019) to explicitly absorb contextual semantics over a BERT framework. However, these approaches limit the source of information available for representing both the premise and hypothesis. Consider the following premise and hypothesis:", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 154, |
| "text": "Yoon et al. (2018)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 219, |
| "end": 236, |
| "text": "Liu et al. (2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 347, |
| "end": 371, |
| "text": "and Zhang et al. (2019b)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 413, |
| "end": 434, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "People cut their expenses for the Golden years. People decrease their expenses for retirement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "It is challenging to know that \"Golden years\" entails \"retirement\" if we rely only on the context within the two sentences. To illustrate how common this problem is, we conduct a manual analysis of BERT classification errors on the NLI task (specifically on the MNLI corpus (Williams et al., 2018) , more details in Section 6), and find that at least 50% of misclassifications require external knowledge, specifically requiring domain-specific knowledge, world knowledge, jargon-based paraphrases, or commonsense knowledge to resolve the entailment. In the above example, a model that learns the relation between \"Golden years\" and \"retirement\" from external knowledge can be used to enhance NLI inference.", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 297, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "On the basis of this idea, Chen et al. (2018) and Zhang et al. (2019a) used external knowledge from WordNet and TransE (Bordes et al., 2013) and applied it to NLI models. In their work, pre-trained representations of external knowledge from knowledge bases (e.g., TransE) were directly applied; they did not tailor knowledge content or structure specifically to the NLI task and did not improve NLI performance (Zhang et al., 2019a) . This finding motivates our investigation on how external knowledge can be efficiently used to improve NLI models. The intention of our work is not to propose a new model that outperforms the state-of-the-art, but instead to focus on building a framework for investigating how different types and representations of external knowledge impact an NLI model's decisions.", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 45, |
| "text": "Chen et al. (2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 50, |
| "end": 70, |
| "text": "Zhang et al. (2019a)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 119, |
| "end": 140, |
| "text": "(Bordes et al., 2013)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 411, |
| "end": 432, |
| "text": "(Zhang et al., 2019a)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Consider our previous examples. We want to represent that the relation between \"young men\" and \"boys\" is positive for entailment, and that the relation between \"old men\" and \"boys\" is negative for entailment. Similarly, we want to represent that the relation between \"Golden years\" and \"retirement\" is positive for entailment. The interplay of external knowledge and entailment gives insight into the power of selecting relevant knowledge with respect to both content and polarity of the knowledge. Here, content indicates the semantic meaning of external knowledge and polarity indicates whether the knowledge relation is positive or negative for entailment. The representation of external knowledge is required to be correct in both aspects for the NLI task. The models learns (1) content via our knowledge extraction phase, by extracting concept edges from knowledge graphs, and (2) polarity via our knowledge training phase, by learning the polarity of the relationships between concepts. We define concepts as words or phrases througout this paper. In this work, we aim to show what type of external knowledge is useful for certain classes of NLI. We examine how different types of knowledge impact neural language model decisions with respect to content and polarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To this end, we propose ERNIE-NLI, an NLI model that integrates external knowledge to enhance and probe NLI inference decisions. First, we adapt knowledge content in various sources to our setup: external knowledge relations are mapped to NLI knowledge relations (Section 4.2). In this step, we not only represent external knowledge from different sources in a unified way, but also convert external knowledge content to the NLI task. Second, the polarity is learned (Section 4.3): NLI knowl-edge embeddings are learned to predict whether they are positive or negative for entailment. In this step, we extend BERT with a knowledge embedding layer and a classification layer. Third, the content and polarity are applied to NLI classification (Section 4.4). All three phases listed above are depicted in Fig. 1 . ERNIE-NLI is developed on the basis of ERNIE (Zhang et al., 2019a) , which did not improve performance on the NLI task, although it was infused with TransE embeddings. Results show that our model ERNIE-NLI enhanced with adapted knowledge achieves better performance than ERNIE for specific classes depending on knowledge input.", |
| "cite_spans": [ |
| { |
| "start": 856, |
| "end": 877, |
| "text": "(Zhang et al., 2019a)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 802, |
| "end": 808, |
| "text": "Fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We perform an in-depth analysis to examine how different types of knowledge impact NLI model's decisions with respect to content and polarity. We conduct a series of experiments to investigate why and how the adapted knowledge enhances NLI predictions. From the experiments, we find that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Integrating knowledge improves performance for NLI classes that correspond to integrated knowledge with regards to the polarity (e.g., positive knowledge improves entailment classification, etc.).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Increased amount of knowledge during training improves performance for NLI labels that correspond to increased knowledge with regards to the polarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Presence of knowledge at inference improves performance for NLI labels that correspond to present knowledge with regards to polarity (e.g., a correct entailment prediction with the presence of positive knowledge is observed to occur more often than with the presence of negative knowledge, etc.).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 ERNIE-NLI performance is robust to new knowledge content.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In summary, the proposed NLI model enhanced with adapted external knowledge from various sources achieves better performance for respective classes, allows us to analyze the impact of knowledge type, and is robust when the knowledge at inference time has shifted. We examine this performance with detailed analysis throughout the paper. Overall our contributions are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a knowledge analysis framework, ERNIE-NLI, that allow us to directly control and analyze adapted knowledge input, to investigate the characteristics of knowledge that result in a performance increase on the NLI task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We present findings that show strong correlations between knowledge polarity and downstream performance, illustrating the knowledge features that are important for increased performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We perform extensive analysis and experimentation to support our findings (e.g., classification change analysis, adding knowledge incrementally, adding unseen knowledge, etc). (Mou et al., 2015) , TreeL-STM (Choi et al., 2018) , etc. Previous work has explored using dynamic self-attention (Yoon et al., 2018) , distance-based self-attention (Im and Cho, 2017) and reinforced self-attention (Shen et al., 2018) to enhance sentence encoders. Ensemble methods that combine multiple models have also shown improvements (Wang et al., 2017; Peters et al., 2018; Kim et al., 2019) . Sun et al. (2019) improved masked language modeling with knowledge masking strategies, via entity-level and phrase-level masking, which showed improvement on NLI. Sun et al. (2020) then expanded this work to continual pre-training, which incrementally learns pretraining tasks through constant multi-task learning. Peters et al. (2019) investigated embedding knowledge bases into large-scale models in a multitask setup, seeing improvements on relationship extraction, entity typing, and word sense disambiguation.", |
| "cite_spans": [ |
| { |
| "start": 178, |
| "end": 196, |
| "text": "(Mou et al., 2015)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 209, |
| "end": 228, |
| "text": "(Choi et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 292, |
| "end": 311, |
| "text": "(Yoon et al., 2018)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 344, |
| "end": 362, |
| "text": "(Im and Cho, 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 393, |
| "end": 412, |
| "text": "(Shen et al., 2018)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 518, |
| "end": 537, |
| "text": "(Wang et al., 2017;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 538, |
| "end": 558, |
| "text": "Peters et al., 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 559, |
| "end": 576, |
| "text": "Kim et al., 2019)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 579, |
| "end": 596, |
| "text": "Sun et al. (2019)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Using external knowledge to enhance NLI models specifically, Chen et al. (2018) obtained the semantic relations between words from WordNet and calculated the relation embeddings using pretrained TransE embeddings. Additionally, previ-ous work has explored injecting lexical knowledge into pre-trained models for MNLI (Williams et al., 2018) , among other tasks (Lauscher et al., 2020; Levine et al., 2020) . Zhang et al. (2019a) adopted a knowledgeable encoder to inject the knowledge information into language representation. However, in contrast to our work, their external knowledge was not trained specifically for the NLI task.", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 79, |
| "text": "Chen et al. (2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 317, |
| "end": 340, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 361, |
| "end": 384, |
| "text": "(Lauscher et al., 2020;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 385, |
| "end": 405, |
| "text": "Levine et al., 2020)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Using knowledge embeddings that represent the relations between entities has been useful in various downstream NLP tasks. Bordes et al. (2013) proposed TransE, a method which modeled relationships by interpreting them as translations operating on the low-dimensional embeddings of the entities. To address the issue of complex relation embeddings, Lin et al. (2015b) proposed CTransR in which the entity pairs are clustered into different groups and where the pairs in the same group share the same relation vector. Xiao et al. (2016) developed TransG, a generative Bayesian non-parametric infinite mixture embedding model, to handle multiple relation semantics of an entity pair. Further, integrated logic rules into a translation based knowledge graph embedding model. Their method automatically mined logic rules from triples in a knowledge graph.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 142, |
| "text": "Bordes et al. (2013)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 348, |
| "end": 366, |
| "text": "Lin et al. (2015b)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 516, |
| "end": 534, |
| "text": "Xiao et al. (2016)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge Embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Previous work has also introduced external knowledge to learn better knowledge embeddings. Lin et al. (2015a) and Luo et al. (2015) utilized relation paths and integrated additional semantic information and enforced the embedding space to be semantically smooth so that entities in the same semantic category were close to each other in the embedding space. Wang et al. (2014) used entity names and Wikipedia anchors to align the embeddings of entities and words in the same space. In our work, we focus on converting knowledge relations from different knowledge sources to relations that are tailored to the NLI task. We then use this knowledge to illustrate the impact that both knowledge content and representation have on model performance.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 109, |
| "text": "Lin et al. (2015a)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 114, |
| "end": 131, |
| "text": "Luo et al. (2015)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 358, |
| "end": 376, |
| "text": "Wang et al. (2014)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge Embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Pre-trained language models face several challenges and previous work has analyzed and illustrated their strenghts and weaknesses. Ettinger (2020) constructed a series of tests for language models and applied these to BERT to study strengths and weakness. Kassner This previous work has shown that when applying pre-trained language models to a new task, a new domain, or new data variations, these models do not always perform well and additional knowledge may be needed to guide them. We examine how different types of knowledge impact language model decisions with respect to both content and polarity.", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 263, |
| "text": "Kassner", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Model Challenges", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "In this section, we introduce the particular NLI corpus and external knowledge sources used throughout this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI corpus and External Knowledge", |
| "sec_num": "3" |
| }, |
| { |
| "text": "MNLI, the Multi-Genre Natural Language Inference Corpus (Williams et al., 2018) , consists of 433k sentence pairs annotated with entailment, contradiction, and neutral labels. The corpus covers various genres of both spoken and written text, and offers a wide range of style, various degrees of formality, and a diverse variety of topics and domains. This dataset is evaluated using standard accuracy.", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 79, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We use several external knowledge sources to learn the relationships between concepts in our task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "External Knowledge Sources", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "ConceptNet ( ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "External Knowledge Sources", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We introduce our terminology in Section 4.1. Then, we introduce the three steps of ERNIE-NLI: (1) knowledge extraction phase (content): extracting knowledge content from external knowledge sources (Section 4.2), (2) knowledge training phase (polarity): learning knowledge embeddings adapted to the NLI task (Section 4.3), and (3) NLI training phase: training our NLI model with the integration of learned knowledge embeddings (Section 4.4). The three phases are shown in Fig. 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 471, |
| "end": 477, |
| "text": "Fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We use the following terms throughout the paper. For clarity, we will demonstrate each term given the example in Table 1 , Example (A).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 113, |
| "end": 120, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Terminology", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "External knowledge pair refers to a pair of two concepts from external knowledge sources, connected by an external knowledge relation, for example RelatedTo(sugar, cream). Each concept may be either a single word or a phrase. External knowledge relation is the relation of the external knowledge pair. Each external knowledge source has a unique set of external knowledge relations. RelatedTo is an example of such a relation. NLI knowledge pair refers to a pair of two concepts from NLI corpus, connected by an NLI knowledge relation, e.g., pos(sugar, cream). NLI knowledge relation is the relation of the NLI knowledge pair. We define two NLI knowledge relations in Section 4.2: pos() and neg(). NLI pair refers to a pair of sentences, in which one sentence is the premise and the other is the hypothesis, as depicted in Table 1 . NLI label is entailment/neutral/contradiction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 823, |
| "end": 830, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Terminology", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To represent external knowledge relations from different sources in a unified way, we define two NLI knowledge relations: pos() and neg(). A rulebased heuristic is developed to map the external knowledge relations to NLI knowledge relations. For example, in Table 1 , we see that RelatedTo is mapped to pos(). Additionally, an external knowledge relation such as Antonym would be mapped to neg(). Each external knowledge relation is mapped to one NLI knowledge relation, where different external knowledge relations may be mapped to the same NLI knowledge relation. The specific mappings are listed in the appendix. NLI knowledge pairs are extracted from each NLI pair. For the i-th NLI pair, with premise P and hypothesis H, we first identify all the concepts (single word or key phrase) in P and H using Python Keyphrase Extraction (PKE) (Boudin, 2016) . We then extract each NLI knowledge pair y(c 1 i , c 2 i ) where c 1 i \u2286 P (a concept in the premise), c 2", |
| "cite_spans": [ |
| { |
| "start": 840, |
| "end": 854, |
| "text": "(Boudin, 2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 258, |
| "end": 265, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "NLI Knowledge Extraction", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "i \u2286 H (a concept in the hypothesis) and where there exists an NLI knowledge relation y between c 1 i and c 2 i . Considering Example (A) in Table 1 , we see that c 1 i = 'sugar', c 2 i = 'cream', and y = pos().", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "NLI Knowledge Extraction", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "There may be multiple NLI knowledge pairs in the i-th NLI pair of premise and hypothesis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Extraction", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To learn the NLI knowledge embeddings, we add two additional components to BERT (Devlin et al., 2019) . Thus, we learn the embedding of y{c 1 i , c 2 i } in the following way. First, the sequence of knowledge tokens {", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 101, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "[CLS] c 1 i [SEP] c 2 i [SEP]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "} is passed as input to BERT. Then, we take the subsequent contextual representations from BERT and pass them through a knowledge embedding layer (a linear layer) which casts our BERT representations into a knowledge embedding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "o = BERT(c 1 i , c 2 i ) (1) k i = W k (o) + b k (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where o is the contextual representation from BERT, W k and b k are weights and bias of the knowledge embedding layer, and k i is the knowledge embedding. Next, the knowledge embedding k i is fed into the NLI knowledge relation classification layer for knowledge fine-tuning:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "l c = W c (k i ) + b c (3) y = softmax(l c )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where W c and b c are weights and bias of the classification layer, and y is the NLI knowledge relation prediction. We use cross-entropy loss during training. In this way, we get the knowledge embedding associated with the NLI knowledge relation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We learn the embeddings for all the NLI knowledge pairs in the i-th NLI pair in the training set such that we have a set of knowledge K i = {k 1 i , . . . , k m i } where m is the length of the knowledge sequence for the i-th NLI pair. We use these embeddings to enhance NLI training described in the next section. The knowledge embeddings are fixed during NLI training. Note that at inference time, we calculate the knowledge embedding of the relation between any two concepts in the premise and hypothesis via Equations 1 and 2, even if the two concepts are not included in the training set. This enables the model to handle unseen concepts and NLI knowledge relations in the inference data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We propose ERNIE-NLI, built on the ERNIE architecture (Zhang et al., 2019a) , to integrate the knowledge embeddings learned in Section 4.3 into the NLI model.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 75, |
| "text": "(Zhang et al., 2019a)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NLI Knowledge Enhanced NLI", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "ERNIE (Zhang et al., 2019a) was developed mainly for integrating knowledge graph information into the entity typing and relation extraction tasks. It has two stacked modules: (a) a textual encoder to capture token embeddings and (b) a knowledge encoder to inject the token-oriented knowledge into the textual encoder output. The textual encoder is a multi-layer bidirectional Transformer encoder, similar to BERT. The knowledge encoder concatenates the token embeddings (output from the textual encoder) and entity embeddings (pre-trained TransE embedding).", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 27, |
| "text": "(Zhang et al., 2019a)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ERNIE", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "ERNIE defines two inputs to the model, a token sequence T = {w 1 , . . . , w n } where n is the length of the token sequence, and a entity sequence that aligns to the given tokens as E = {e 1 , . . . , e m } where m is the length of the entity sequence. ERNIE is then defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ERNIE", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "u = ERNIE(T, E)", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "ERNIE", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "For example, consider the following sentence:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ERNIE", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "Bob Dylan wrote Blowin' in the Wind.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ERNIE", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "To recognize the relation between Bob Dylan and Blowin' in the Wind, ERNIE concatenates the entity embeddings of Bob Dylan and Blowin' in the Wind with the corresponding token embeddings. For more details, please refer to the original paper (Zhang et al., 2019a) .", |
| "cite_spans": [ |
| { |
| "start": 241, |
| "end": 262, |
| "text": "(Zhang et al., 2019a)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ERNIE", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "Though ERNIE is mainly designed for the entity typing and relation extraction tasks, it also reports performance on the MNLI dataset. ERNIE does not show an improvement over BERT, even though it uses the information from the knowledge graph. We speculate that this is because the knowledge type (named entities) is neither the type of knowledge required for the NLI task nor domain-specific to the NLI task. In contrast to ERNIE, which directly uses TransE embeddings (which are not adapted to the NLI task), we propose ERNIE-NLI which uses knowledge embeddings trained on the NLI dataset and tailored for the NLI task. Similar to ERNIE, two inputs are fed into ERNIE-NLI: a token sequence T = {w 1 , . . . , w n } and a knowledge sequence, aligned to the given tokens, as K = {k 1 , . . . , k m } where m is the length of the knowledge sequence. In contrast to ERNIE, knowledge relations are tailored to the NLI task and knowledge embeddings are trained on the NLI training data. Thus, our model definition becomes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ERNIE-NLI", |
| "sec_num": "4.4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "u = ERNIE(T, K)", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "ERNIE-NLI", |
| "sec_num": "4.4.2" |
| }, |
| { |
| "text": "where our knowledge embeddings for K are fixed during NLI training, similar to the original setup. However, unlike the original setup, our knowledge embeddings are now adapted to the NLI task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ERNIE-NLI", |
| "sec_num": "4.4.2" |
| }, |
| { |
| "text": "As introduced in Section 3, we examine various external knowledge sources. We describe the setups used in this work, all of which are combinations of these sources. The performance of each setup is reported in Section 6. PC is the basic setup and includes Paraphrase Database (PPDB) and ConceptNet. In this setup, we find that the number of positive NLI knowledge relations is greater than the number of negative NLI knowledge relations. Thus, we design additional setups to balance the ratio of positive and negative relations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "PC&Bal balances the positive and negative NLI knowledge relations to 50%-50% by downsampling positive relations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "PCW adds negative NLI knowledge relations from WordNet to PC. PCW&Bal balances the positive and negative NLI knowledge relations to 50%-50% on PCW by downsampling positive relations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "6 Results and Analysis", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Before designing our experiments, we manually analyzed BERT misclassifiations on MNLI, which inspired the decisions regarding content and polarity of knowledge required for improved reasoning and performance. We achieved 83.90% on the MNLI dev set with BERT. We analyzed 40 misclassifications per MNLI domain, and found that across all domains, at least 50% of misclassifications required external knowledge to be resolved. We also found that the combination of ConceptNet and PPDB covered at least 70% of the required concepts for these misclassifications across all domains. Thus, we decided to investigate the impact of external knowledge on NLI models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BERT Error Analysis", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We run both ERNIE and ERNIE-NLI on the MNLI corpus using our experimental setups. With respect to ERNIE as the baseline, the accuracy changes of ERNIE-NLI are shown in Table 2 . As introduced in Section 5, PC&Bal has less positive relations than PC. We can see that in Table 2 , PC has better performance on the entailment class than PC&Bal, but has worse performance on neutral and contradiction. Similarly, PCW achieves better performance on entailment than PCW&Bal and worse performance on neutral and contradiction. PCW has more negative NLI knowledge relations than PC since PCW has additional negative relations from WordNet. As shown in Table 2 , PC achieves better performance on the entailment class than PCW and worse performance on the neutral class. Similarly, PC&Bal has better performance on the entailment class than PCW&Bal and worse performance on neutral and contradiction classes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 168, |
| "end": 175, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 269, |
| "end": 276, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 644, |
| "end": 651, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "ERNIE-NLI Performance", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "These results demonstrate a correlation between knowledge polarity and NLI performance, specifically that adding positive knowledge can train an NLI model that is better at making entailment predictions, and that adding negative knowledge can train an NLI model that is better at making neutral and contradiction predictions. As shown in Table 2 best setup for the contradiction and neutral classes is PCW&Bal. The accuracy of the two setups per label and on all labels are included in Table 3 below. Note that in both setups, ERNIE-NLI not only achieves better performance on the particular NLI class, but also achieves better total performance. While ERNIE-NLI achieves better performance in this knowledge-integration setup, for comparison we would like to point out that the state-of-the-art is achieved by T5-11B (Raffel et al., 2020) , which achieves 92.2% on the MNLI test set.", |
| "cite_spans": [ |
| { |
| "start": 818, |
| "end": 839, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 338, |
| "end": 345, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 486, |
| "end": 493, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "ERNIE-NLI Performance", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We further analyze the new errors per label made by ERNIE-NLI compared to ERNIE. Table 4 shows the number of error changes grouped by NLI labels, and demonstrates that all the increased error changes from ERNIE to ERNIE-NLI enhanced with PC (i.e., positive numbers in the row of PC) are false entailment classifications. This observation is consistent with the findings in Table 2 : with the introduction of more positive than negative knowledge, our model becomes biased towards entailment. Similarly, all of the increased errors changes from ERNIE to ERNIE-NLI enhanced with PCW&Bal (i.e., positive numbers in the row of PCW&Bal) are false neutral predictions. More interestingly, in this PCW&Bal setup where the positive and negative knowledge is balanced, the new errors only occur when the gold label is entailment and all other errors decrease. These results indicate that the model is able to utilize knowledge in a way that reflects an understanding of the NLI label. When the knowledge is balanced,", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 81, |
| "end": 88, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 373, |
| "end": 380, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classification Change Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Gold Contra Neutral Entail Prediction N E C E C N", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Change Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "PC -2 22 -26 24 -9 -81 PCW&Bal 0 -16 -20 -20 -5 117 the model better understands the boundary between entailment and contradiction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Change Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "To better understand knowledge effect on ERNIE-NLI, we conduct a series of experiments to answer the following questions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Change Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "\u2022 Is more knowledge better?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Change Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "\u2022 How does knowledge polarity affect NLI classification?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Change Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "\u2022 How is performance affected if there is new knowledge at inference time?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Change Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "To investigate performance gains with respect to the addition of NLI knowledge, we report the NLI performance depending on the portion of positive knowledge used during NLI knowledge learning under the PC setup in Table 5 , which shows how the incremental addition of positive knowledge during knowledge embedding training increases the NLI performance for the entailment label. Note that the total accuracy is increased as more positive knowledge is added.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 214, |
| "end": 221, |
| "text": "Table 5", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Knowledge Portion during Training", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "An NLI contradiction pair may extract positive NLI knowledge relations and an entailment pair may extract negative NLI knowledge relations. We analyze the correlation between the presence of NLI knowledge relations and the prediction results on the dev set. Specifically, we compare the prediction changes from ERNIE to ERNIE-NLI using the PC setup. Table 6 shows these prediction changes. X \u2192 Y represents the NLI pairs where baseline ERNIE predicts X while ERNIE-NLI predicts Y. We also include the number of correct prediction changes (i.e., where Y is gold).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 350, |
| "end": 357, |
| "text": "Table 6", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Knowledge Type during Inference", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "Since we show results on the PC setup, we focus on the first row and first column in the table. The results in the first row indicate that a correct entailment classification with the presence of positive knowledge is observed to occur more often than with the presence of negative knowledge. The results in the first column indicate that a correct entailment classification with the presence of positive knowledge is observed to occur more often than a correct neutral or contradiction classification with positive knowledge. Thus, we see a strong correlation between the presence of positive knowledge and a correct entailment classification. This is a result of using the PC setup in this analysis, which is tailored for positive relations. Thus, while the correct entailment classification has the strongest correlation, we also see the strong effect of positive relations across all categories.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge Type during Inference", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "We would like to note that these findings are not discovered solely by looking at the label accuracies, as other classification shifts in this setting occur. We believe carrying out careful analyses, such as these, enable us to gain a deeper understanding of how knowledge affects the neural model, as we see clear trends in the effect of knowledge presence by polarity via this analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge Type during Inference", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "To investigate our model's robustness in a common scenario where there are unseen knowledge relations in the evaluation data, we experiment with using only four external knowledge relations as NLI", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unseen Knowledge during Inference", |
| "sec_num": "6.6" |
| }, |
| { |
| "text": "Contra Neutral Entail Constrained 0.09 0.48 -0.17 Unconstrained -0.31 0.57 0.63 Table 7 : ERNIE-NLI % Accuracy changes for handling unseen relations with respect to ERNIE.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 80, |
| "end": 87, |
| "text": "Table 7", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Mapping", |
| "sec_num": null |
| }, |
| { |
| "text": "knowledge relations during training. The four relations are: RelatedTo, IsA, Independent, Antonym. During inference, we design two scenarios. First, we design a constrained scenario in which new relations during inference time are dropped. For example, if an \"Entails\" relation exists between two concepts according to the knowledge sources, the knowledge is discarded, since it is not included in one of the four relations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping", |
| "sec_num": null |
| }, |
| { |
| "text": "Second, we design an unconstrained scenario that computes the knowledge embedding at inference time. The sequence of the two concepts linked by the \"Entails\" relation, {", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping", |
| "sec_num": null |
| }, |
| { |
| "text": "[CLS] c 1 i [SEP] c 2 i [SEP]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping", |
| "sec_num": null |
| }, |
| { |
| "text": "}, are fed into the BERT layer in Equation (1) and knowledge embedding layer in Equation (2) to get the knowledge embedding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping", |
| "sec_num": null |
| }, |
| { |
| "text": "We compare the performance of the two scenarios in Table 7 . The unconstrained scenario performs better than the constrained scenario, especially on the entailment label, given that there is more positive knowledge. The result shows ERNIE-NLI's capability of utilizing unseen knowledge relations to improve NLI, indicating the robustness of ERNIE-NLI in providing good predictions even if the inference data has shifted.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 51, |
| "end": 58, |
| "text": "Table 7", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Mapping", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we discuss the two examples depicted in Table 1 , to show how external knowledge can assist models on the NLI task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 57, |
| "end": 64, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Examples", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Integrating external knowledge can equip the model with world knowledge it did not have access to before. In Table 1 , Example (A), the baseline model without external knowledge predicts contradiction, which is incorrect. Our ERNIE-NLI model with external knowledge predicts neutral, which is correct. The external knowledge used in this example is RelatedTo(sugar, cream) and AtLocation(sugar, coffee). The baseline model seems to predict this as contradiction mainly because the premise states never ... in her coffee while the hypothesis states in her coffee. The external knowledge helps correctly align the components: sugar and cream. Note that although the external knowledge indicates that sugar is related to cream, it does not necessarily yield an entailment prediction as the context is still being taking into consideration by the model, which understands that sugar is the main condition for entailment and that cream and sugar are not synonymous in this context.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 116, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introducing World Knowledge", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "The model looks for similar words or phrases when it judges whether the hypothesis can be entailed from the premise. In the baseline model, the contextual embeddings alone are not strong enough to drive the prediction. In Table 1 , Example (B), the baseline prediction is contradiction, which is wrong. Our ERNIE-NLI model with external knowledge predicts entailment, which is correct. The key knowledge required for this example is Paraphrase(efforts, initiative). By adding this paraphrase knowledge, the enhanced model recognizes the entailment relation of the pair.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 222, |
| "end": 229, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasizing Phrase Similarity", |
| "sec_num": "7.2" |
| }, |
| { |
| "text": "We propose ERNIE-NLI, an NLI model that integrates external knowledge to enhance NLI performance. Our external knowledge representations are tailored to the NLI task and trained to adapt to NLI data requirements. We show that our model enhanced with external knowledge achieves better performance than the previous ERNIE model with non-adapted knowledge depending on the knowledge utilized. We examine these results with several analysis experiments to enable strong conclusions about the correlation between knowledge and NLI classification. Results also demonstrate that the model is able to handle unseen knowledge when the inference data shifts from training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank the reviewers for their useful feedback. This work was performed while Lisa interned at Bloomberg, and was later supported by DARPA MCS Grant N66001-19-2-4031, NSF-CAREER Award 1846185, and an NSF PhD Fellowship. The views are those of the authors and not of the funding agency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "A.1 Knowledge Mapping Table 8 shows the external knowledge relations that are mapped to positive and negative NLI knowledge relations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 22, |
| "end": 29, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Appendix", |
| "sec_num": null |
| }, |
| { |
| "text": "For our experiments, we did not tune hyperparameters but rather selected our settings to be consistent with Zhang et al. (2019a) . We used batch size 12, learning rate 2e-5, and random seed 42. We did 1 epoch of relation training and 4 epochs of NLI training. We hold these settings constant across all experiments.We built on the framework released by Zhang et al. (2019a) , which included a pytorch implementation of ERNIE, and used all versions and infrastructures included in their implementation.", |
| "cite_spans": [ |
| { |
| "start": 108, |
| "end": 128, |
| "text": "Zhang et al. (2019a)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 353, |
| "end": 373, |
| "text": "Zhang et al. (2019a)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Hyperparameter Settings", |
| "sec_num": null |
| }, |
| { |
| "text": "Fine ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Course Grained", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Translating embeddings for modeling multirelational data", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Garcia-Duran", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Oksana", |
| "middle": [], |
| "last": "Yakhnenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "NeurIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Bordes, Nicolas Usunier, Alberto Garcia- Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi- relational data. In NeurIPS.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Recognising textual entailment with logical inference", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| }, |
| { |
| "first": "Katja", |
| "middle": [], |
| "last": "Markert", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "HLT-EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos and Katja Markert. 2005. Recognising textual entailment with logical inference. In HLT- EMNLP.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "pke: an open source pythonbased keyphrase extraction toolkit", |
| "authors": [ |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Boudin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Florian Boudin. 2016. pke: an open source python- based keyphrase extraction toolkit. In COLING.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A fast unified model for parsing and sentence understanding", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhinav", |
| "middle": [], |
| "last": "Gauthier", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghav", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Jon Gauthier, Abhinav Ras- togi, Raghav Gupta, Christopher D. Manning, and Christopher Potts. 2016. A fast unified model for parsing and sentence understanding. In ACL.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Neural natural language inference models enhanced with external knowledge", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Diana Inkpen, and Si Wei. 2018. Neural natural language inference models enhanced with external knowledge. In ACL.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Learning to compose task-specific tree structures", |
| "authors": [ |
| { |
| "first": "Jihun", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sang-Goo", |
| "middle": [], |
| "last": "Kang Min Yoo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jihun Choi, Kang Min Yoo, and Sang-goo Lee. 2018. Learning to compose task-specific tree structures. In AAAI.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "The pascal recognising textual entailment challenge", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Ido Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernardo", |
| "middle": [], |
| "last": "Glickman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Machine Learning Challenges Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "177--190", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2005. The pascal recognising textual entailment challenge. In Machine Learning Challenges Work- shop, pages 177-190. Springer.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "What bert is not: Lessons from a new suite of psycholinguistic diagnostics for language models", |
| "authors": [ |
| { |
| "first": "Allyson", |
| "middle": [], |
| "last": "Ettinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "34--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Allyson Ettinger. 2020. What bert is not: Lessons from a new suite of psycholinguistic diagnostics for lan- guage models. Transactions of the Association for Computational Linguistics, 8:34-48.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Ppdb: The paraphrase database", |
| "authors": [ |
| { |
| "first": "Juri", |
| "middle": [], |
| "last": "Ganitkevitch", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "758--764", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juri Ganitkevitch, Benjamin Van Durme, and Chris Callison-Burch. 2013. Ppdb: The paraphrase database. In NAACL-HLT, pages 758-764.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Semantically smooth knowledge graph embedding", |
| "authors": [ |
| { |
| "first": "Shu", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lihong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "84--94", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P15-1009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shu Guo, Quan Wang, Bin Wang, Lihong Wang, and Li Guo. 2015. Semantically smooth knowledge graph embedding. In ACL, pages 84-94, Beijing, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Hypothesis transformation and semantic variability rules used in recognizing textual entailment", |
| "authors": [ |
| { |
| "first": "Adrian", |
| "middle": [], |
| "last": "Iftene", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Balahur", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "ACL-PASCAL Workshop on Textual Entailment and Paraphrasing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adrian Iftene and Alexandra Balahur. 2007. Hypoth- esis transformation and semantic variability rules used in recognizing textual entailment. In ACL- PASCAL Workshop on Textual Entailment and Para- phrasing.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Distance-based self-attention network for natural language inference", |
| "authors": [ |
| { |
| "first": "Jinbae", |
| "middle": [], |
| "last": "Im", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungzoon", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1712.02047" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinbae Im and Sungzoon Cho. 2017. Distance-based self-attention network for natural language infer- ence. arXiv preprint arXiv:1712.02047.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Negated and misprimed probes for pretrained language models: Birds can talk, but cannot fly", |
| "authors": [ |
| { |
| "first": "Nora", |
| "middle": [], |
| "last": "Kassner", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nora Kassner and Hinrich Sch\u00fctze. 2020. Negated and misprimed probes for pretrained language models: Birds can talk, but cannot fly. ACL.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Semantic sentence matching with denselyconnected recurrent and co-attentive information", |
| "authors": [ |
| { |
| "first": "Seonhoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Inho", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nojun", |
| "middle": [], |
| "last": "Kwak", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seonhoon Kim, Inho Kang, and Nojun Kwak. 2019. Semantic sentence matching with densely- connected recurrent and co-attentive information. In AAAI.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Specializing unsupervised pretraining models for word-level semantic similarity", |
| "authors": [ |
| { |
| "first": "Anne", |
| "middle": [], |
| "last": "Lauscher", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1371--1383", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne Lauscher, Ivan Vuli\u0107, Edoardo Maria Ponti, Anna Korhonen, and Goran Glava\u0161. 2020. Specializing unsupervised pretraining models for word-level se- mantic similarity. In Proceedings of the 28th Inter- national Conference on Computational Linguistics, pages 1371-1383.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Sensebert: Driving some sense into bert", |
| "authors": [ |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Levine", |
| "suffix": "" |
| }, |
| { |
| "first": "Barak", |
| "middle": [], |
| "last": "Lenz", |
| "suffix": "" |
| }, |
| { |
| "first": "Or", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ori", |
| "middle": [], |
| "last": "Ram", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Padnos", |
| "suffix": "" |
| }, |
| { |
| "first": "Or", |
| "middle": [], |
| "last": "Sharir", |
| "suffix": "" |
| }, |
| { |
| "first": "Shai", |
| "middle": [], |
| "last": "Shalev-Shwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Amnon", |
| "middle": [], |
| "last": "Shashua", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Shoham", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4656--4667", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoav Levine, Barak Lenz, Or Dagan, Ori Ram, Dan Padnos, Or Sharir, Shai Shalev-Shwartz, Amnon Shashua, and Yoav Shoham. 2020. Sensebert: Driv- ing some sense into bert. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 4656-4667.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Modeling relation paths for representation learning of knowledge bases", |
| "authors": [ |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Huanbo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Siwei", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Song", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "705--714", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D15-1082" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yankai Lin, Zhiyuan Liu, Huanbo Luan, Maosong Sun, Siwei Rao, and Song Liu. 2015a. Modeling relation paths for representation learning of knowledge bases. In EMNLP, pages 705-714, Lisbon, Portugal. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Learning entity and relation embeddings for knowledge graph completion", |
| "authors": [ |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yankai Lin, Zhiyuan Liu, Maosong Sun, Yang Liu, and Xuan Zhu. 2015b. Learning entity and relation em- beddings for knowledge graph completion. In AAAI.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Multi-task deep neural networks for natural language understanding", |
| "authors": [ |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengcheng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Weizhu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaodong Liu, Pengcheng He, Weizhu Chen, and Jian- feng Gao. 2019. Multi-task deep neural networks for natural language understanding. In ACL.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Context-dependent knowledge graph embedding", |
| "authors": [ |
| { |
| "first": "Yuanfei", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1656--1661", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D15-1191" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuanfei Luo, Quan Wang, Bin Wang, and Li Guo. 2015. Context-dependent knowledge graph embedding. In EMNLP, pages 1656-1661, Lisbon, Portugal. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "An extended model of natural logic", |
| "authors": [ |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Maccartney", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "IWCS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bill MacCartney and Christopher D Manning. 2009. An extended model of natural logic. In IWCS.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Wordnet: a lexical database for english", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Communications of the ACM", |
| "volume": "38", |
| "issue": "11", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A Miller. 1995. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39- 41.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Natural language inference by tree-based convolution and heuristic matching", |
| "authors": [ |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Men", |
| "suffix": "" |
| }, |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan, and Zhi Jin. 2015. Natural language inference by tree-based convolution and heuristic matching. In ACL.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of NAACL-HLT, pages 2227-2237.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Knowledge enhanced contextual word representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Logan", |
| "suffix": "" |
| }, |
| { |
| "first": "Vidur", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "43--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E Peters, Mark Neumann, Robert Logan, Roy Schwartz, Vidur Joshi, Sameer Singh, and Noah A Smith. 2019. Knowledge enhanced contextual word representations. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 43-54.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Language models as knowledge bases", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Petroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Anton", |
| "middle": [], |
| "last": "Bakhtin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "H" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Petroni, Tim Rockt\u00e4schel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H. Miller, and Se- bastian Riedel. 2019. Language models as knowl- edge bases? In EMNLP.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter J", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "21", |
| "issue": "", |
| "pages": "1--67", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the lim- its of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21:1-67.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Probing natural language inference models through semantic fragments", |
| "authors": [ |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Lawrence", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Moss", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.07521" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyle Richardson, Hai Hu, Lawrence S Moss, and Ashish Sabharwal. 2019. Probing natural lan- guage inference models through semantic fragments. arXiv preprint arXiv:1909.07521.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Reinforced selfattention network: a hybrid of hard and soft attention for sequence modeling", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Sen", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengqi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Shen, Tianyi Zhou, Guodong Long, Jing Jiang, Sen Wang, and Chengqi Zhang. 2018. Reinforced self- attention network: a hybrid of hard and soft attention for sequence modeling. In IJCAI.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Conceptnet 5.5: An open multilingual graph of general knowledge", |
| "authors": [ |
| { |
| "first": "Robyn", |
| "middle": [], |
| "last": "Speer", |
| "suffix": "" |
| }, |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Chin", |
| "suffix": "" |
| }, |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Havasi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Thirty-First AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robyn Speer, Joshua Chin, and Catherine Havasi. 2017. Conceptnet 5.5: An open multilingual graph of gen- eral knowledge. In Thirty-First AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Ernie: Enhanced representation through knowledge integration", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuohuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shikun", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuyi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Han", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Danxiang", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Hao Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, and Hua Wu. 2019. Ernie: Enhanced repre- sentation through knowledge integration. ACL.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Ernie 2.0: A continual pre-training framework for language understanding", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuohuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shikun", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Hao Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Hao Tian, Hua Wu, and Haifeng Wang. 2020. Ernie 2.0: A continual pre-training framework for language un- derstanding. AAAI.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "olmpics-on what language model pre-training captures", |
| "authors": [ |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Talmor", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanai", |
| "middle": [], |
| "last": "Elazar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1912.13283" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alon Talmor, Yanai Elazar, Yoav Goldberg, and Jonathan Berant. 2019. olmpics-on what lan- guage model pre-training captures. arXiv preprint arXiv:1912.13283.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Logic rules powered knowledge graph embedding", |
| "authors": [ |
| { |
| "first": "Pengwei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dejing", |
| "middle": [], |
| "last": "Dou", |
| "suffix": "" |
| }, |
| { |
| "first": "Fangzhao", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lianwen", |
| "middle": [], |
| "last": "Nisansa De Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1903.03772" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengwei Wang, Dejing Dou, Fangzhao Wu, Nisansa de Silva, and Lianwen Jin. 2019. Logic rules pow- ered knowledge graph embedding. arXiv preprint arXiv:1903.03772.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Knowledge graph and text jointly embedding", |
| "authors": [ |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianwen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianlin", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge graph and text jointly em- bedding. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Bilateral multi-perspective matching for natural language sentences", |
| "authors": [ |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Hamza", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiguo Wang, Wael Hamza, and Radu Florian. 2017. Bilateral multi-perspective matching for natural lan- guage sentences. In IJCAI.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In NAACL- HLT.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Transg: A generative model for knowledge graph embedding", |
| "authors": [ |
| { |
| "first": "Han", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Minlie", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "2316--2325", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Han Xiao, Minlie Huang, and Xiaoyan Zhu. 2016. Transg: A generative model for knowledge graph embedding. In ACL, pages 2316-2325.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Dynamic self-attention: Computing attention over words dynamically for sentence embedding", |
| "authors": [ |
| { |
| "first": "Deunsol", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongbok", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sangkeun", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.07383" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deunsol Yoon, Dongbok Lee, and SangKeun Lee. 2018. Dynamic self-attention: Computing atten- tion over words dynamically for sentence embed- ding. arXiv preprint arXiv:1808.07383.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Ernie: Enhanced language representation with informative entities", |
| "authors": [ |
| { |
| "first": "Zhengyan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. 2019a. Ernie: En- hanced language representation with informative en- tities. In ACL.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Semantics-aware bert for language understanding", |
| "authors": [ |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuwei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuchao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuailiang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.02209" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuosheng Zhang, Yuwei Wu, Hai Zhao, Zuchao Li, Shuailiang Zhang, Xi Zhou, and Xiang Zhou. 2019b. Semantics-aware bert for language understanding. arXiv preprint arXiv:1909.02209.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "content": "<table><tr><td>1. Knowledge Extraction KGs CN PPDB WN</td><td>extracted pairs</td><td>2. Knowledge Training BERT</td><td>knowledge embedding</td><td>Knowledge Classifier</td></tr><tr><td>MNLI</td><td/><td/><td/><td>Relation</td></tr><tr><td>concepts</td><td colspan=\"2\">knowledge embedding</td><td/><td>Label Space</td></tr><tr><td/><td/><td>ERNIE</td><td/><td/></tr><tr><td>MNLI data</td><td/><td/><td/><td/></tr><tr><td/><td colspan=\"2\">3. ERNIE-NLI</td><td/><td/></tr><tr><td colspan=\"5\">Figure 1: Components of the setup: (1) Knowledge Extraction Phase: Extracts knowledge content from external</td></tr><tr><td colspan=\"5\">knowledge sources; (2) Knowledge Training Phase: Learns knowledge embeddings adapted to the NLI task; and</td></tr><tr><td colspan=\"5\">(3) ERNIE-NLI: Trains NLI model with the integration of our learned knowledge embeddings.</td></tr><tr><td colspan=\"3\">(2020) added a component that focused on negation</td><td/><td/></tr><tr><td colspan=\"3\">to the LAMA (LAnguage Model Analysis) eval-</td><td/><td/></tr><tr><td>uation framework (</td><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td>and Sch\u00fctze</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "Petroni et al., 2019), showing that BERT failed on most negated statements. Talmor et al. (2019) designed eight reasoning tasks and illustrated that reasoning abilities are strongly context-dependent. Specific to NLI,Richardson et al. (2019) constructed challenging NLI datasets with new semantic fragments and showed that language models, though trained on NLI benchmark datasets, did not perform well on the new fragments." |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td>PPDB, Paraphrase Database (Ganitkevitch et al.,</td></tr><tr><td>2013), contains over 220 million paraphrase pairs</td></tr><tr><td>extracted from bilingual parallel corpora. Each</td></tr><tr><td>paraphrase pair consists of two concepts that have</td></tr><tr><td>a similar meaning.</td></tr><tr><td>WordNet (Miller, 1995) groups nouns, verbs, ad-</td></tr><tr><td>jectives and adverbs into sets of cognitive syn-</td></tr><tr><td>onyms (synsets), each expressing a distinct concept.</td></tr><tr><td>Synsets are linked by different relations including</td></tr><tr><td>synonym, antonymy, hypernymy, hyponymy, etc.</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "Speer et al., 2017) is a large semantic graph consisting of general knowledge. Concepts are related through predicates such as IsA(jazz, genre of music) and AtLocation(jazz, new orleans)." |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "Premise: I had an additional reason for that belief in the fact that all the cups found contained sugar, which Mademoiselle Cynthia never took in her coffee. Hypothesis: Mademoiselle Cynthia often took milk or cream in her coffee. Label: neutral External Knowledge Pair: RelatedTo(sugar, cream), AtLocation(sugar, coffee) NLI Knowledge Pair: pos(sugar, cream), pos(sugar, coffee) (B) Premise: Lalley also is enthused about other bar efforts on behalf of the poor, most notably the Legal Assistance Center will operate out of the new courthouse. Hypothesis: Lalley is enthusiastic about the bar's initiative to help the poor." |
| }, |
| "TABREF4": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "" |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>Setup</td><td colspan=\"3\">Contra Neutral Entail</td></tr><tr><td>PC</td><td>-0.62</td><td>0.13</td><td>2.59</td></tr><tr><td>PC&Bal</td><td>0.22</td><td>0.96</td><td>-1.06</td></tr><tr><td>PCW</td><td>-1.00</td><td>0.66</td><td>1.41</td></tr><tr><td>PCW&Bal</td><td>0.59</td><td>1.47</td><td>-0.84</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": ", the best setup for the entailment class is PC and the" |
| }, |
| "TABREF6": { |
| "content": "<table><tr><td>Model</td><td colspan=\"2\">Contr. Neut.</td><td>Ent.</td><td>Total</td></tr><tr><td>ERNIE</td><td>85.91</td><td colspan=\"3\">83.74 80.84 83.42</td></tr><tr><td>ERNIE-NLI E</td><td>85.29</td><td colspan=\"3\">83.87 83.43 84.18</td></tr><tr><td>ERNIE-NLI C&N</td><td>86.50</td><td colspan=\"3\">85.21 80.00 83.74</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "ERNIE-NLI improvement over ERNIE in % Accuracy per Contradiction/Neutral/Entailment label." |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td>: % Accuracy per label for ERNIE and ERNIE-</td></tr><tr><td>NLI using best setup for each label.</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "" |
| }, |
| "TABREF8": { |
| "content": "<table><tr><td/><td colspan=\"3\">Contra Neutral Entail Total</td></tr><tr><td>0%</td><td>86.35</td><td>83.93</td><td>80.12 83.37</td></tr><tr><td>25%</td><td>86.25</td><td>83.80</td><td>80.52 83.44</td></tr><tr><td>50%</td><td>86.50</td><td>85.21</td><td>80.00 83.74</td></tr><tr><td>78%</td><td>84.91</td><td>84.40</td><td>82.25 83.80</td></tr><tr><td>100%</td><td>85.29</td><td>83.87</td><td>83.43 84.18</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "ERNIE-NLI error changes with respect to ERNIE. A positive value indicates that ERNIE-NLI makes more errors than ERNIE on that label and vice versa." |
| }, |
| "TABREF9": { |
| "content": "<table><tr><td>: ERNIE-NLI performance with respect to the</td></tr><tr><td>portion of positive knowledge used during knowledge</td></tr><tr><td>training.</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "" |
| }, |
| "TABREF11": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "ERNIE-NLI classification changes with respect to ERNIE depending on presence of knowledge at inference time. Numbers without parenthesis are the total changes and numbers in the parenthesis are the correct changes." |
| } |
| } |
| } |
| } |