| { |
| "paper_id": "Q17-1028", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:12:42.819024Z" |
| }, |
| "title": "Learning Distributed Representations of Texts and Entities from Knowledge Base", |
| "authors": [ |
| { |
| "first": "Ikuya", |
| "middle": [], |
| "last": "Yamada", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Shindo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Nara Institute of Science and Technology", |
| "location": { |
| "country": "Japan" |
| } |
| }, |
| "email": "shindo@is.naist.jp" |
| }, |
| { |
| "first": "Hideaki", |
| "middle": [], |
| "last": "Takeda", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Informatics", |
| "location": { |
| "country": "Japan" |
| } |
| }, |
| "email": "takeda@nii.ac.jp" |
| }, |
| { |
| "first": "Yoshiyasu", |
| "middle": [], |
| "last": "Takefuji", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Keio University", |
| "location": { |
| "country": "Japan" |
| } |
| }, |
| "email": "takefuji@sfc.keio.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We describe a neural network model that jointly learns distributed representations of texts and knowledge base (KB) entities. Given a text in the KB, we train our proposed model to predict entities that are relevant to the text. Our model is designed to be generic with the ability to address various NLP tasks with ease. We train the model using a large corpus of texts and their entity annotations extracted from Wikipedia. We evaluated the model on three important NLP tasks (i.e., sentence textual similarity, entity linking, and factoid question answering) involving both unsupervised and supervised settings. As a result, we achieved state-of-the-art results on all three of these tasks. Our code and trained models are publicly available for further academic research. 1", |
| "pdf_parse": { |
| "paper_id": "Q17-1028", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We describe a neural network model that jointly learns distributed representations of texts and knowledge base (KB) entities. Given a text in the KB, we train our proposed model to predict entities that are relevant to the text. Our model is designed to be generic with the ability to address various NLP tasks with ease. We train the model using a large corpus of texts and their entity annotations extracted from Wikipedia. We evaluated the model on three important NLP tasks (i.e., sentence textual similarity, entity linking, and factoid question answering) involving both unsupervised and supervised settings. As a result, we achieved state-of-the-art results on all three of these tasks. Our code and trained models are publicly available for further academic research. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Methods capable of learning distributed representations of arbitrary-length texts (i.e., fixed-length continuous vectors that encode the semantics of texts), such as sentences and paragraphs, have recently attracted considerable attention (Le and Mikolov, 2014; Kiros et al., 2015; Li et al., 2015; Wieting et al., 2016; Hill et al., 2016b; Kenter et al., 2016) . These methods aim to learn generic representations that are useful across domains similar to word embedding methods such as Word2vec (Mikolov et al., 2013b) and GloVe (Pennington et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 247, |
| "end": 261, |
| "text": "Mikolov, 2014;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 262, |
| "end": 281, |
| "text": "Kiros et al., 2015;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 282, |
| "end": 298, |
| "text": "Li et al., 2015;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 299, |
| "end": 320, |
| "text": "Wieting et al., 2016;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 321, |
| "end": 340, |
| "text": "Hill et al., 2016b;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 341, |
| "end": 361, |
| "text": "Kenter et al., 2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 497, |
| "end": 520, |
| "text": "(Mikolov et al., 2013b)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 531, |
| "end": 556, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Another interesting approach is learning distributed representations of entities in a knowledge base (KB) such as Wikipedia and Freebase. These methods encode information of entities in the KB into a continuous vector space. They are shown to be effective for various KB-related tasks such as entity search (Hu et al., 2015) , entity linking (Hu et al., 2015; Yamada et al., 2016) , and link prediction (Bordes et al., 2013; Wang et al., 2014; Lin et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 307, |
| "end": 324, |
| "text": "(Hu et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 342, |
| "end": 359, |
| "text": "(Hu et al., 2015;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 360, |
| "end": 380, |
| "text": "Yamada et al., 2016)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 403, |
| "end": 424, |
| "text": "(Bordes et al., 2013;", |
| "ref_id": null |
| }, |
| { |
| "start": 425, |
| "end": 443, |
| "text": "Wang et al., 2014;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 444, |
| "end": 461, |
| "text": "Lin et al., 2015)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we describe a novel method to bridge these two different approaches. In particular, we propose Neural Text-Entity Encoder (NTEE), a neural network model to jointly learn distributed representations of texts (i.e., sentences and paragraphs) and KB entities. For every text in the KB, our model aims to predict its relevant entities, and places the text and the relevant entities close to each other in a continuous vector space. We use humanedited entity annotations obtained from Wikipedia (see Table 1 ) as supervised data of relevant entities to the texts containing these annotations. 2 Note that, KB entities have been conventionally used to model semantics of texts. A representative example is Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007) , which represents the semantics of a text using a sparse vector space, where each dimension corresponds to the relevance score of the text to each entity. Essentially, ESA shows that text can be accurately represented using a small set of its relevant entities. Based on this fact, we hypothesize that we can use the annotations of relevant entities as the supervised data of learning text representations. Furthermore, we also consider that placing texts and entities into the same vector space enables us to easily compute the similarity between texts and entities, which can be beneficial for various KB-related tasks.", |
| "cite_spans": [ |
| { |
| "start": 603, |
| "end": 604, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 748, |
| "end": 782, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 510, |
| "end": 517, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In order to test this hypothesis, we conduct three experiments involving both the unsupervised and the supervised tasks. First, we use standard semantic textual similarity datasets to evaluate the quality of the learned text representations of our method in an unsupervised fashion. As a result, our method clearly outperformed the state-of-the-art methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Furthermore, to test the effectiveness of our method to perform KB-related tasks, we address the following two important problems in the supervised setting: entity linking (EL) and factoid question answering (QA). In both tasks, we adopt a simple multi-layer perceptron (MLP) classifier with the learned representations as features. We tested our method using two standard datasets (i.e., CoNLL 2003 and TAC 2010) for the EL task and a popular factoid QA dataset based on the quiz bowl quiz game for the factoid QA task. As a result, our method outperformed recent state-of-the-art methods on both the EL and the factoid QA tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Additionally, there have also been proposed methods that map words and entities into the same continuous vector space (Wang et al., 2014; Yamada et al., 2016; Fang et al., 2016) . Our work differs from these works because we aim to map texts (i.e., sentences and paragraphs) and entities into the same vector space.", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 137, |
| "text": "(Wang et al., 2014;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 138, |
| "end": 158, |
| "text": "Yamada et al., 2016;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 159, |
| "end": 177, |
| "text": "Fang et al., 2016)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our contributions are summarized as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a neural network model that jointly learns vector representations of texts and KB entities. We train the model using a large amount of entity annotations extracted directly from Wikipedia.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We demonstrate that our proposed representations are surprisingly effective for various NLP tasks. In particular, we apply the proposed model to three different NLP tasks, namely semantic textual similarity, entity linking, and factoid question answering, and achieve stateof-the-art results on all three tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The Lord of the Rings is an epic high-fantasy novel written by English author J. R. R. Tolkien.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Entity Annotations: The Lord of the Rings, Epic (genre), High fantasy, J. R. R. Tolkien Table 1 : An example of a sentence with entity annotations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 88, |
| "end": 95, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We release our code and trained models to the community at https://github.com/ studio-ousia/ntee to facilitate further academic research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we propose our approach of learning distributed representations of texts and entities in KB.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Given a text t (a sequence of words w 1 , ..., w N ), we train our model to predict entities e 1 , ..., e n that appear in t. Formally, the probability that represents the likelihood of an entity e appearing in t is defined as the following softmax function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (e|t) = exp(v e v t ) e \u2208E KB exp(v e v t ) ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where E KB is a set of all entities in KB, and v e \u2208 R d and v t \u2208 R d are the vector representations of the entity e and the text t, respectively. We compute v t using the element-wise sum of word vectors in t with L 2 normalization and a fully connected layer. Let us denote v s as a vector of the sum of word vectors", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "(v s = N i=1 v w i ), v t is computed as follows: v t = W v s v s + b,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where W \u2208 R d\u00d7d is a weight matrix, and b \u2208 R d is a bias vector. Here, we initialize v w and v e using the pre-trained representations described in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The loss function of our model is defined as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L = \u2212 (t,Et)\u2208\u0393 e\u2208Et log P (e|t),", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where \u0393 denotes a set of pairs each of which consists of a text t and its entity annotations E t in KB. One problem in training our model is that the denominator in Eq. (1) is computationally very expensive because it involves summation over all entities in KB. We address this problem by replacing E KB in Eq. (1) with E * , which is the union of the positive entity e and the randomly chosen k negative entities that do not appear in t. This method can be viewed as negative sampling (Mikolov et al., 2013b ) with a uniform negative distribution.", |
| "cite_spans": [ |
| { |
| "start": 486, |
| "end": 508, |
| "text": "(Mikolov et al., 2013b", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In addition, because the length of a text t is arbitrary in our model, we test the following two settings: t as a paragraph, and t as a sentence 3 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The parameters to be learned by our model are the vector representations of words and entities in our vocabulary V , the weight matrix W , and the bias vector b. Consequently, the total number of parameters in our model is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameters", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "|V | \u00d7 d + d 2 + d.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameters", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We initialize the representations of words and entities using pre-trained representations to reduce the training time. We use the skip-gram model of Word2vec (Mikolov et al., 2013a; Mikolov et al., 2013b) with negative sampling trained with Wikipedia articles. In order to create a corpus for the skip-gram model from Wikipedia, we simply replace the name of each entity annotation in Wikipedia articles with the unique identifier of the entity the annotation refers to. This simple method enables us to easily train the distributed representations of words and entities simultaneously. We used a Wikipedia dump generated in July 2016 4 . For the hyper-parameters of the skip-gram model, we used standard parameters such as the context window size being 10, and the size of negative samples being 5. We used the Python Word2vec implementation in Gensim 5 . Additionally, the entity representations were normalized to unit length before they were used as the pre-trained representations.", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 181, |
| "text": "(Mikolov et al., 2013a;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 182, |
| "end": 204, |
| "text": "Mikolov et al., 2013b)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameters", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We trained our model by using the English DBpedia abstract corpus (Br\u00fcmmer et al., 2016) , an open corpus of Wikipedia texts with entity annotations manually created by Wikipedia contributors. 6 It was extracted from the first introductory sections of 4.4 million Wikipedia articles. We train our model by iterating over the texts and their entity annotations in the corpus.", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 88, |
| "text": "(Br\u00fcmmer et al., 2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 193, |
| "end": 194, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We used words that appear five times or more and entities that appear three times or more in the corpus, and simply ignored the other words and entities. As a result, our vocabulary V consisted of 705,168 words and 957,207 entities. Further, the number of valid words and entity annotations were approximately 382 million and 28 million, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Additionally, we also introduce one heuristic method to generate entity annotations. For each text, we add a pseudo-annotation that points to the entity of which the KB page is the source of the text. Because every KB page describes its corresponding entity, it typically contains many mentions referring to the entity. However, because hyper-linking to the web page itself does not make sense, these kinds of mentions cannot be observed as annotations in Wikipedia. Therefore, we use the aforementioned heuristic method to address this problem.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Our model has several hyper-parameters. Following Kenter et al. (2016) , the number of dimensions we used was d = 300. The mini-batch size was fixed at 100, the size of negative samples k was set to 30, and the training consisted of one epoch.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 70, |
| "text": "Kenter et al. (2016)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Other Details", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "The model was implemented using Python and Theano (Theano Development Team, 2016). The training took approximately six days using a NVIDIA K80 GPU. We trained the model using stochastic gradient descent (SGD) and its learning rate was controlled by RMSprop (Tieleman and Hinton, 2012) .", |
| "cite_spans": [ |
| { |
| "start": 257, |
| "end": 284, |
| "text": "(Tieleman and Hinton, 2012)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Other Details", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "In order to evaluate our model presented in the previous section, we conduct experiments on three important NLP tasks using the representations learned by our model. First, we conduct an experiment on a semantic textual similarity task in order to evaluate the quality of the learned text representations. Next, we conduct experiments on two important NLP problems (i.e., EL and factoid QA) in order to test the effectiveness of our proposed representations as features for downstream NLP tasks. Finally, we further qualitatively analyze the learned representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Note that we separately describe how we address each task using our representations in the subsection of each experiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Semantic textual similarity aims to test how well a model reflects human judgments of the semantic similarity between two sentence pairs. The task has been used as a standard method to evaluate the quality of distributed representations of sentences in past work (Kiros et al., 2015; Hill et al., 2016a; Kenter et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 263, |
| "end": 283, |
| "text": "(Kiros et al., 2015;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 284, |
| "end": 303, |
| "text": "Hill et al., 2016a;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 304, |
| "end": 324, |
| "text": "Kenter et al., 2016)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Textual Similarity", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our experimental setup follows that of a previously published experiment (Hill et al., 2016a) . We use two standard datasets: (1) the STS 2014 dataset (Agirre et al., 2014) consisting of 3,750 sentence pairs and human ratings from six different sources (e.g., newswire, web forums, dictionary glosses), and (2) the SICK dataset (Marelli et al., 2014) consisting of 10,000 pairs of sentences and human ratings. In both datasets, the ratings take values between 1 and 5, where a rating of 1 indicates that the sentence pair is not related, and a rating of 5 means that they are highly related. All sentence pairs except the 500 SICK trial pairs were used for our experiments.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 93, |
| "text": "(Hill et al., 2016a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 151, |
| "end": 172, |
| "text": "(Agirre et al., 2014)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 328, |
| "end": 350, |
| "text": "(Marelli et al., 2014)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "We train our model by experimenting with both paragraphs and sentences. Further, we introduce another training setting (denoted by fixed NTEE), where the parameters in the word representations and the entity representations are fixed throughout the training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "We compute the cosine distance between the vectors of the two sentences in each sentence pair (de-rived using Eq. (2)) and measure the Pearson's r and Spearman's p correlations between these distances and the gold-standard human ratings. Additionally, we use Pearson's r as our primal score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "For baselines for this experiment, we selected the following four recent state-of-the-art models. Brief descriptions of these models are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "\u2022 Word2vec (Mikolov et al., 2013a; Mikolov et al., 2013b ) is a popular word embedding model. We compute a sentence representation by element-wise addition of the vectors of its words (Mitchell and Lapata, 2008) . We add its skip-gram and CBOW models to our baselines. We train the model with the hyper-parameters and the Wikipedia corpus explained in Section 2.2. Thus, the skip-gram model is equivalent to the pre-trained representations used in our model. Furthermore, in order to conduct a fair comparison between the skip-gram model and our model, we also add skip-gram (plain), which is a skip-gram model trained using a different corpus. In particular, the corpus is augmented using the texts in DBpedia abstract corpus 7 , and its entity annotations are treated as regular text phrases (not replaced to their unique identifiers).", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 34, |
| "text": "(Mikolov et al., 2013a;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 35, |
| "end": 56, |
| "text": "Mikolov et al., 2013b", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 184, |
| "end": 211, |
| "text": "(Mitchell and Lapata, 2008)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "\u2022 Skip-thought (Kiros et al., 2015) is a model that is trained to predict adjacent sentences given each sentence in a corpus. Sentences are encoded using a recurrent neural network (RNN) with gated recurrent units (GRU).", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 35, |
| "text": "(Kiros et al., 2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "\u2022 Siamese CBOW (Kenter et al., 2016) is a model that aims to predict sentences occurring next to each other in a corpus. A sentence representation is derived using a vector average of words in the sentence.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 36, |
| "text": "(Kenter et al., 2016)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "We obtain a score of a sentence pair by using the cosine distance between the sentence representations of the pair. Table 2 shows our experimental results with the baseline methods. We obtained the scores of Skip-thought from Hill et al. (2016a) and those of Siamese CBOW from Kenter et al. (2016) .", |
| "cite_spans": [ |
| { |
| "start": 226, |
| "end": 245, |
| "text": "Hill et al. (2016a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 277, |
| "end": 297, |
| "text": "Kenter et al. (2016)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 116, |
| "end": 123, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Our NTEE models were able to outperform the state-of-the-art models in all datasets in terms of Pearson's r. Moreover, our fixed NTEE models outperformed the NTEE models in several datasets and the skip-gram models in all datasets. Further, our model trained with sentences consistently outperformed the model trained with paragraphs. Additionally, the skip-gram models performed mostly similarly regardless of the difference of their corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "Note that, because we fix the word representations and the entity representations during the training of the fixed NTEE models, the difference between the fixed NTEE models and the skip-gram model is merely the presence of the learned fully connected layer. Because our model places a text representation and the representations of its relevant entities close to each other, the function of the layer can be recognized as an affine transformation from the word-based text representation to the entity-based text representation. We consider that the reason why the fixed NTEE model performed well among datasets is that the entity-based text representations are more semantic (less syntactic) and contain less noise than the word-based text representations, thus are much more suitable for addressing this task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "Entity Linking (EL) (Cucerzan, 2007; Mihalcea and Csomai, 2007; Milne and Witten, 2008; Ratinov et al., 2011; Hajishirzi et al., 2013; Ling et al., 2015) is the task of resolving ambiguous mentions of entities to their referent entities in KB. EL has recently received considerable attention because of its effectiveness in various NLP tasks such as information extraction and semantic search. The task is challenging because of the ambiguity in the meaning of entity mentions (e.g., \"Washington\" can refer to the state, the capital of the US, the first US president George Washington, and so forth).", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 36, |
| "text": "(Cucerzan, 2007;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 37, |
| "end": 63, |
| "text": "Mihalcea and Csomai, 2007;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 64, |
| "end": 87, |
| "text": "Milne and Witten, 2008;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 88, |
| "end": 109, |
| "text": "Ratinov et al., 2011;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 110, |
| "end": 134, |
| "text": "Hajishirzi et al., 2013;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 135, |
| "end": 153, |
| "text": "Ling et al., 2015)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Entity Linking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The key to improve the performance of EL is to accurately model the semantic context of entity mentions. Because our model learns the likelihood of an entity appearance in a given text, it can naturally be used for modeling the context of EL.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Entity Linking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our experimental setup follows the setup described in past work (Chisholm and Hachey, 2015; He et al., 2013; Yamada et al., 2016) . We use two standard datasets: the CoNLL dataset and the TAC 2010 dataset. The CoNLL dataset, which was proposed in Hoffart et al. (2011) , includes training, development, and test sets consisting of 946, 216, and 231 documents, respectively. We use the training set to train our EL method, and the test set for measuring the performance of our method. We report the standard micro-(aggregates over all mentions) and macro-(aggregates over all documents) accuracies of the top-ranked candidate entities.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 91, |
| "text": "(Chisholm and Hachey, 2015;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 92, |
| "end": 108, |
| "text": "He et al., 2013;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 109, |
| "end": 129, |
| "text": "Yamada et al., 2016)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 247, |
| "end": 268, |
| "text": "Hoffart et al. (2011)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "The TAC 2010 dataset is another dataset constructed for the Text Analysis Conference (TAC) 8 (Ji et al., 2010) . The dataset comprises training and test sets containing 1,043 and 1,013 documents, respectively. We use mentions only with a valid entry in the KB, and report the micro-accuracy score of the top-ranked candidate entities. We evaluate our method on 1,020 mentions contained in the test set. Further, we randomly select 10% of the documents from the training set, and use these documents as a development set.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 110, |
| "text": "(Ji et al., 2010)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Additionally, we collected two measures that have frequently been used in past EL work: entity popularity and prior probability. The entity popularity of an entity e is defined as log(|A e, * | + 1), where A e, * is the set of KB anchors that point to e. The prior probability of mention m referring to entity e is defined as |A e,m |/|A * ,m |, where A * ,m represents all KB anchors with the same surface as m, and A e,m is a subset of A * ,m that points to e. These two measures were collected directly from the same Wikipedia dump described in Section 2.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Following past work, we address the EL task by solving two sub-tasks: candidate generation and mention disambiguation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Method", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Candidate Generation In candidate generation, candidates of referent entities are generated for each mention. We use the candidate generation method proposed in Yamada et al. (2016) for the sake of compatibility with their state-of-the-art results. In particular, we use a public dataset proposed in Pershina et al. (2015) for the CoNLL dataset. For the TAC 2010 dataset, we use a dictionary that is directly built from the Wikipedia dump explained in Section 2.2. We retrieved possible mention surfaces of an entity from (1) the title of the entity, (2) the title of another entity redirecting to the entity, and (3) the names of anchors that point to the entity. Furthermore, to improve the recall, we also tokenize the title of each entity and treat resulted tokens as possible mention surfaces of the corresponding entity. We sort the entity candidates according to their entity popularities, and retain the top 100 candidates for computational efficiency. The recall of the can-didate generation was 99.9% and 94.6% on the test sets of the CoNLL and TAC 2010 datasets, respectively.", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 181, |
| "text": "Yamada et al. (2016)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Method", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "We address the mention disambiguation task using a multi-layer perceptron (MLP) with a single hidden layer. Figure 1 shows the architecture of our neural network model. The model selects an entity from among the entity candidates for each mention m in a document t. For each entity candidate e, we input the vector of the entity v e 9 , the vector of the document v t (computed with Eq. (2)), the dot product of v e and v t 10,11 , and the small number of features for EL described below. On top of these features, we stack a hidden layer with nonlinearity using rectified linear units (ReLU) and dropout. We also add an output layer onto the hidden layer and select the most relevant entity using softmax over the entity candidates.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 108, |
| "end": 116, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Mention Disambiguation", |
| "sec_num": null |
| }, |
| { |
| "text": "Similar to past work (Chisholm and Hachey, 2015; Yamada et al., 2016) , we include a small number of features in our model. First, we use the following three standard EL features: the entity popularity of e, the prior probability of m referring to e, and the maximum prior probability of e of all mentions in t. In addition, we optionally add features representing string similarities between the title of e and the surface of m (Meij et al., 2012; Yamada et al., 2016) . These similarities include whether the title of e exactly equals or contains the surface of m, and whether the title of e starts or ends with the surface of m.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 48, |
| "text": "(Chisholm and Hachey, 2015;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 49, |
| "end": 69, |
| "text": "Yamada et al., 2016)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 429, |
| "end": 448, |
| "text": "(Meij et al., 2012;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 449, |
| "end": 469, |
| "text": "Yamada et al., 2016)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mention Disambiguation", |
| "sec_num": null |
| }, |
| { |
| "text": "We tuned the following two hyper-parameters using the micro-accuracy on the development set of each dataset: the number of units in the hidden layer and the dropout probability. The results are listed in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 204, |
| "end": 211, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Mention Disambiguation", |
| "sec_num": null |
| }, |
| { |
| "text": "Further, we trained the model by using stochastic gradient descent (SGD). The learning rate was controlled by RMSprop, and the mini-batch size was set to 100. We also used the micro-accuracy on the development set to locate the best epoch for testing. We tested the NTEE model and the fixed NTEE model to initialize the parameters of representations v t and v e . Furthermore, we also tested two simple methods using the pre-trained representations (i.e., skip-gram). The first method is that the representations of words and entities are initialized using the pre-trained representations presented in Section 2.2, and the other parameters are initialized randomly (denoted by SG-proj). The second method is the same method as in SG-proj except the training corpus of the pre-trained representations is augmented using the DBpedia abstract corpus (denoted by SGproj-dbp). 12 Regarding the NTEE and the fixed NTEE models, sentences (rather than paragraphs) were used to train the proposed representations because of the superior performance of this approach on both the CoNLL and TAC 2010 datasets. Further, we did not update our representations of words (v w ) and entities (v e ) in the training of our EL method, because updating them did not generally improve the performance. Additionally, we used a vector filled with zeros as representations of entities that were not contained in our vocabulary.", |
| "cite_spans": [ |
| { |
| "start": 872, |
| "end": 874, |
| "text": "12", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mention Disambiguation", |
| "sec_num": null |
| }, |
| { |
| "text": "We adopt the following six recent state-of-the-art EL methods as our baselines:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 Hoffart (Hoffart et al., 2011) 5,000 0.1 Table 3 : Hyper-parameters used for EL and QA tasks. hidden units is the number of units in the hidden layers, and dropout is the dropout probability.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 32, |
| "text": "(Hoffart et al., 2011)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 43, |
| "end": 50, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 He (He et al., 2013) proposed a method for learning the representations of mention contexts and entities from KB using the stacked denoising auto-encoders. These representations were then used to address EL.", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 22, |
| "text": "(He et al., 2013)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 Chisholm (Chisholm and Hachey, 2015) used a support vector machine (SVM) with various features derived from KB and a Wikilinks dataset (Singh et al., 2012 ).", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 156, |
| "text": "(Singh et al., 2012", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 Pershina (Pershina et al., 2015) improved EL by modeling coherence using the personalized page rank algorithm.", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 34, |
| "text": "(Pershina et al., 2015)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 Globerson (Globerson et al., 2016 ) improved the coherence model for EL by introducing an attention mechanism in order to focus only on strong relations of entities.", |
| "cite_spans": [ |
| { |
| "start": 2, |
| "end": 35, |
| "text": "Globerson (Globerson et al., 2016", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 Yamada (Yamada et al., 2016) proposed a model for learning the joint distributed representations of words and KB entities from KB, and addressed EL using context models based on the representations. Table 4 compares the results of our method with those obtained with the state-of-the-art methods. Our method achieved strong results on both the CoNLL and the TAC 2010 datasets. In particular, the NTEE model clearly outperformed the other proposed models. We also tested the performance of the NTEE model without using the string similarity features (strsim) and found that these features also contributed to the performance.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 30, |
| "text": "(Yamada et al., 2016)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 201, |
| "end": 208, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "Furthermore, our method successfully outperformed all the recent strong state-of-the-art methods on both datasets. This is remarkable because most state-of-the-art EL methods, including all baseline methods except that of He, adopt global approaches, where all entity mentions in a document are simultaneously disambiguated based on coherence among disambiguation decisions. Our method depends only on the local (or textual) context available in the target document. Thus, the performance can likely be improved further by combining a global model with our local model as frequently observed in past work (Ratinov et al., 2011; Chisholm and Hachey, 2015; Yamada et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 605, |
| "end": 627, |
| "text": "(Ratinov et al., 2011;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 628, |
| "end": 654, |
| "text": "Chisholm and Hachey, 2015;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 655, |
| "end": 675, |
| "text": "Yamada et al., 2016)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "We also conducted a brief error analysis using the NTEE model and the test set of the CoNLL dataset by randomly inspecting 200 errors. As a result, 22% of the errors were mentions of which the referent entities were not contained in our vocabulary. In this case, our method could not incorporate any contextual information, thus likely resulting in disambiguation errors. The other major types of errors were the mentions of location names. The dataset contains many location names (e.g., Japan) referring to sports team entities (e.g., Japan national football team). It appeared that our method neglected to distinguish whether a location name refers to the location itself or a sports team. In particular, our method often wrongly resolved these mentions referring to sports team entities into the corresponding location entities and vice versa. They accounted for 20.5% and 14.5% out of the total number of errors, respectively. Moreover, we observed several difficult cases such as selecting Hindu instead of Hindu nationalism, Christian instead of Catholicism, New York City instead of New York, and so forth.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.2.4" |
| }, |
| { |
| "text": "Question Answering (QA) has been one of the central problems in NLP research for the last few decades. Factoid QA is one of the typical types of QA that aims to predict an entity (e.g., events, authors, and actors) that is discussed in a given question. Quiz bowl is a popular trivia quiz game in which players are asked questions consisting of 4-6 sentence questions describing entities. The dataset of the quiz bowl has been frequently used for evaluating factoid QA methods in recent literature on QA (Iyyer et al., 2014; Iyyer et al., 2015; Xu and Li, 2016) .", |
| "cite_spans": [ |
| { |
| "start": 504, |
| "end": 524, |
| "text": "(Iyyer et al., 2014;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 525, |
| "end": 544, |
| "text": "Iyyer et al., 2015;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 545, |
| "end": 561, |
| "text": "Xu and Li, 2016)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factoid Question Answering", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this section, we demonstrate that our proposed representations can be effectively used as background knowledge for the QA task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factoid Question Answering", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We followed an existing method (Xu and Li, 2016) for our experimental setup. We used the public quiz bowl dataset proposed in Iyyer et al. (2014) . 13 Following past work (Iyyer et al., 2014; Iyyer et al., 2015; Xu and Li, 2016) , we only used questions belonging to the history and literature categories, and only used answers that appeared at least six times. For questions referring to the same answer, we sampled 20% of each for the development set and test sets, and the remaining 60% for the training set. As a result, we obtained 1,535 training, 511 development, and 511 test questions for history, and 2,524 training, 840 development, and 840 test questions for literature. The number of possible answers was 303 and 424 in the history and literature categories, respectively.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 48, |
| "text": "(Xu and Li, 2016)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 126, |
| "end": 145, |
| "text": "Iyyer et al. (2014)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 148, |
| "end": 150, |
| "text": "13", |
| "ref_id": null |
| }, |
| { |
| "start": 171, |
| "end": 191, |
| "text": "(Iyyer et al., 2014;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 192, |
| "end": 211, |
| "text": "Iyyer et al., 2015;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 212, |
| "end": 228, |
| "text": "Xu and Li, 2016)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Following past work (Iyyer et al., 2014; Iyyer et al., 2015; Xu and Li, 2016 ), we address this task as a classification problem that selects the most relevant answer from the possible answers observed in the dataset. We adopt the same neural network architecture described in Section 3.2.2 (see Figure 1) . We use the following three features: the vector of the entity v e 14 , the vector of the question v t (computed using Eq. (2)), and the dot product of v e and v t . Note that we do not include other features in this task.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 40, |
| "text": "(Iyyer et al., 2014;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 41, |
| "end": 60, |
| "text": "Iyyer et al., 2015;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 61, |
| "end": 76, |
| "text": "Xu and Li, 2016", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 296, |
| "end": 305, |
| "text": "Figure 1)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Our Method", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "The hyper-parameters used in our model (i.e., the number of units in the hidden layer and the dropout probability) are shown in Table 3 . We tuned these parameters using the development set of each dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 128, |
| "end": 135, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Our Method", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Unlike the EL task, we updated all parameters including representations of words and entities for training our QA method. We used stochastic gradient descent (SGD) to train the model. The minibatch size was fixed at 100, and the learning rate was controlled by RMSprop. We used the accuracy on the development set of each dataset to detect the best epoch. Similar to the EL task, we tested the four models to initialize the representations v t and v e , i.e., the NTEE, the fixed NTEE, the SG-proj, and the SGproj-dbp models. Further, the representations of the NTEE model and the fixed NTEE model were those that were trained with the sentences because of their overall superior accuracy compared to those trained with paragraphs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Method", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "We use two types of baselines: two conventional bag-of-words (BOW) models and two state-of-theart neural network models. The details of these models are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "\u2022 BOW (Iyyer et al., 2014 ) is a conventional approach using a logistic regression (LR) classifier trained with binary BOW features to predict the correct answer.", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 25, |
| "text": "(Iyyer et al., 2014", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "\u2022 BOW-DT (Iyyer et al., 2014) is based on the BOW baseline augmented with the feature set with dependency relation indicators.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 29, |
| "text": "(Iyyer et al., 2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "\u2022 QANTA (Iyyer et al., 2014) is an approach based on a recursive neural network to derive the distributed representations of questions. The method also uses the LR classifier with the derived representations as features.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 28, |
| "text": "(Iyyer et al., 2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "\u2022 FTS-BRNN (Xu and Li, 2016) is based on the bidirectional recurrent neural network (RNN) with gated recurrent units (GRU). Similar to QANTA, the method adopts the LR classifier with the derived representations as features. Table 5 shows the results of our methods compared with those of the baseline methods. The results of BOW, BOW-DT, and QANTA were obtained from Xu and Li (2016) . We also include the result reported in Iyyer et al. (2014) (denoted by QANTAfull), which used a significantly larger dataset than ours for training and testing.", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 28, |
| "text": "(Xu and Li, 2016)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 367, |
| "end": 383, |
| "text": "Xu and Li (2016)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 425, |
| "end": 444, |
| "text": "Iyyer et al. (2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 224, |
| "end": 231, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "The experimental results show that our NTEE model achieved the best performance compared to the other proposed models and all the baseline methods on both the history and the literature datasets. Table 5 : Accuracies of the proposed method and the state-of-the-art methods for the factoid QA task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 196, |
| "end": 203, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3.4" |
| }, |
| { |
| "text": "In particular, despite the simplicity of the neural network architecture of our method compared to the state-of-the-art methods (i.e., QANTA and FTS-BRNN), our method clearly outperformed these methods. This demonstrates the effectiveness of our proposed representations as background knowledge for the QA task. We also conducted a brief error analysis using the test set of the history dataset. Our observations indicated that our method mostly performed perfect in terms of predicting the types of target answers (e.g., locations, events, and people). However, our method erred in delicate cases such as predicting Henry II of England instead of Henry I of England, and Syracuse, Sicily instead of Sicily.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3.4" |
| }, |
| { |
| "text": "In order to investigate what happens inside our model, we conducted a qualitative analysis using our proposed representations trained with sentences. We first inspected the word representations of our model and our pre-trained representations (i.e., the skip-gram model) by computing the top five similar words of five words (i.e., her, dry, spanish, tennis, moon) using cosine similarity. The results are presented in Table 6 . Interestingly, our model is somewhat more specific than the skip-gram model. For example, there is only one word she whose cosine similarity to the word her is more than 0.5 in our model, whereas all the corresponding similar words in the skip-gram model (i.e., she, his, herself, him, and mother) satisfy that condition. We observe a similar trend for the similar words of dry. Furthermore, all the words similar to tennis are strictly re-lated to the sport itself in our model, whereas the corresponding similar words of the skip-gram model contain broader words such as ball sports (e.g., badminton and volleyball). A similar trend can be observed for the similar words of spanish and moon.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 419, |
| "end": 426, |
| "text": "Table 6", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative Analysis", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Similarly, we also compared our entity representations with those of the pre-trained representations by computing the top five similar entities of six entities (i.e., Europe, Golf, Tea, Smartphone, Scarlett Johansson, and The Lord of the Rings) with respect to cosine similarity. Table 7 contains the results. For the entities Europe and Golf, we observe similar trends to our word representations. Particularly, in our model, the most similar entities of Europe and Golf are Eastern Europe and Golf course, respectively, whereas those of the skip-gram model are Asia and Tennis, respectively. However, the similar entities of most entities (e.g., Tea, Smartphone, Scarlett Johansson and The Lord of the Rings) appear to be similar between our model and the skipgram model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 280, |
| "end": 287, |
| "text": "Table 7", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative Analysis", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Various neural network models that learn distributed representations of arbitrary-length texts (e.g., paragraphs and sentences) have recently been proposed. These models aimed to produce general-purpose text representations that can be used with ease in various downstream NLP tasks. Although most of these models learn text representations from an unstructured text corpus (Le and Mikolov, 2014; Kiros et al., 2015; Kenter et al., 2016) , there have also been proposed models that learn text representations by leveraging structured linguistic resources. For instance, Wieting et al. (2016) trained their model using a large number of noisy phrase pairs retrieved from the Paraphrase Database (PPDB) (Ganitkevitch et al., 2013) . Hill et al. (2016b) use several public dictionaries to train the model by mapping definition texts in a dictionary to representations of the words explained by these texts. To our knowledge, our work is the first work to learn generic text representations with the supervision of entity annotations.", |
| "cite_spans": [ |
| { |
| "start": 382, |
| "end": 396, |
| "text": "Mikolov, 2014;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 397, |
| "end": 416, |
| "text": "Kiros et al., 2015;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 417, |
| "end": 437, |
| "text": "Kenter et al., 2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 570, |
| "end": 591, |
| "text": "Wieting et al. (2016)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 701, |
| "end": 728, |
| "text": "(Ganitkevitch et al., 2013)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 731, |
| "end": 750, |
| "text": "Hill et al. (2016b)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Several methods have also been proposed for extending the word embedding methods. For example, Levy and Goldberg (2014) proposed a method to train word embedding with dependency-based con- (Hu et al., 2015; . These models are typically based on the skip-gram model and directly model the semantic relatedness between KB entities. Our work differs from these studies because we aim to learn representations of arbitrary-length texts in addition to entities. Another related approach is the relational embedding (or knowledge embedding) (Bordes et al., 2013; Wang et al., 2014; Lin et al., 2015) , which encodes entities as continuous vectors and relations as some operations on the vector space, such as vector addition. These models typically learn representations from large KB graphs consisting of entities and relations. Similarly, the universal schema (Riedel et al., 2013; Toutanova et al., 2015; Verga et al., 2016) jointly learned continuous representations of KB relations, entities, and surface text patterns for the relation extraction task.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 119, |
| "text": "Levy and Goldberg (2014)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 189, |
| "end": 206, |
| "text": "(Hu et al., 2015;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 535, |
| "end": 556, |
| "text": "(Bordes et al., 2013;", |
| "ref_id": null |
| }, |
| { |
| "start": 557, |
| "end": 575, |
| "text": "Wang et al., 2014;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 576, |
| "end": 593, |
| "text": "Lin et al., 2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 856, |
| "end": 877, |
| "text": "(Riedel et al., 2013;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 878, |
| "end": 901, |
| "text": "Toutanova et al., 2015;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 902, |
| "end": 921, |
| "text": "Verga et al., 2016)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Finally, Yamada et al. (2016) recently proposed a method to jointly learn the embeddings of words and entities from Wikipedia using the skip-gram model and applied it to EL. Our method differs from their method in that their method does not directly model arbitrary-length texts (i.e., paragraphs and sentences), which we proved to be highly effective for various tasks in this paper. Moreover, we also showed that the joint embedding of texts and entities can be applied not only to EL but also for wider applications such as semantic textual similarity and factoid QA.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 29, |
| "text": "Yamada et al. (2016)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this paper, we presented a novel model capable of jointly learning distributed representations of texts and entities from a large number of entity annotations in Wikipedia. Our aim was to construct the proposed general-purpose model such that it enables practitioners to address various NLP tasks with ease. We achieved state-of-the-art results on three important NLP tasks (i.e., semantic textual similarity, entity linking, and factoid question answering), which clearly demonstrated the effectiveness of our model. Furthermore, the qualitative analysis showed that the characteristics of our learned representations apparently differ from those of the conventional word embedding model (i.e., the skip-gram model), which we plan to investigate in more detail in the future. Moreover, we make our code and trained models publicly available for future research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Future work includes analyzing our model more extensively and exploring the effectiveness of our model in terms of other NLP tasks. We also aim to test more expressive neural network models (e.g., LSTM) to derive our text representations. Furthermore, we believe that one of the promising directions would be to incorporate the rich structural data of the KB such as relationships between entities, links between entities, and the hierarchical category structure of entities. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://github.com/studio-ousia/ntee", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Entity annotations in Wikipedia can be viewed as supervised data of relevant entities because Wikipedia instructs its contributors to create annotations only where they are relevant in its manual: https://en.wikipedia.org/ wiki/Wikipedia:Manual_of_Style", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Transactions of the Association for Computational Linguistics, vol. 5, pp. 397-411, 2017. Action Editor: Kristina Toutanova .Submission batch: 12/2016; Revision batch: 3/2017; Published 11/2017. c 2017 Association for Computational Linguistics. Distributed under a CC-BY 4.0 license.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the open-source Apache OpenNLP to detect sentences.4 The Wikipedia dump was downloaded from Wikimedia Downloads: https://dumps.wikimedia.org/ 5 https://radimrehurek.com/gensim/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The corpus also includes annotations that are generated using heuristics. We did not use these pseudo-annotations and used only the entity annotations that were created by Wikipedia contributors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We augment the corpus simply by appending the texts in DBpedia abstract corpus to the Wikipedia corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.nist.gov/tac/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We normalized ve to unit length because of its overall higher accuracy.10 Note that, the dot product represents the unnormalized likelihood that e appears in t (see Eq.(1)).11 We also tested using the cosine similarity rather than the dot product, but it slightly degraded the performance in the EL task and the factoid QA task described below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We augmented the corpus by simply concatenating the Wikipedia corpus and the DBpedia abstract corpus. Similar to the Wikipedia corpus, we replaced each entity annotation in the DBpedia abstract corpus by its unique identifier of the entity referred by the annotation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The dataset was downloaded from https://cs.umd. edu/\u02dcmiyyer/qblearn/. Note that the public dataset is significantly smaller than the one used in past work(Iyyer et al., 2014;Iyyer et al., 2015) because they also used a proprietary dataset in addition to the public dataset.14 Similar to our EL method, we also normalize ve to unit length because of its overall higher accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the TACL editor Kristina Toutanova and the anonymous reviewers for helpful comments on an earlier draft of this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "SemEval-2014 Task 10: Multilingual Semantic Textual Similarity", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Carmen", |
| "middle": [], |
| "last": "Banea", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Gonzalez-Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Rigau", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "81--91", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Carmen Banea, Claire Cardie, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2014. SemEval-2014 Task 10: Multilingual Seman- tic Textual Similarity. In Proceedings of the 8th In- ternational Workshop on Semantic Evaluation, pages 81-91.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Translating Embeddings for Modeling Multi-relational Data", |
| "authors": [], |
| "year": null, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "26", |
| "issue": "", |
| "pages": "2787--2795", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Translating Embeddings for Modeling Multi-relational Data. In Advances in Neural Information Processing Systems 26, pages 2787-2795.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "DBpedia Abstracts: A Large-Scale, Open, Multilingual NLP Training Corpus", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Br\u00fcmmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Milan", |
| "middle": [], |
| "last": "Dojchinovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Hellmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Br\u00fcmmer, Milan Dojchinovski, and Sebastian Hellmann. 2016. DBpedia Abstracts: A Large-Scale, Open, Multilingual NLP Training Corpus. In Proceed- ings of the Tenth International Conference on Lan- guage Resources and Evaluation.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Entity Disambiguation with Web Links", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Chisholm", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Hachey", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "145--156", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Chisholm and Ben Hachey. 2015. Entity Dis- ambiguation with Web Links. Transactions of the As- sociation for Computational Linguistics, 3:145-156.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Large-Scale Named Entity Disambiguation Based on Wikipedia Data", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Silviu Cucerzan", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "708--716", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Silviu Cucerzan. 2007. Large-Scale Named Entity Dis- ambiguation Based on Wikipedia Data. In Proceed- ings of the 2007 Joint Conference on Empirical Meth- ods in Natural Language Processing and Computa- tional Natural Language Learning, pages 708-716.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Entity Disambiguation by Knowledge and Text Jointly Embedding", |
| "authors": [ |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "260--269", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ming Li. 2016. Entity Disambiguation by Knowledge and Text Jointly Embedding. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning, pages 260-269.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Computing Semantic Relatedness Using Wikipedia-Based Explicit Semantic Analysis", |
| "authors": [ |
| { |
| "first": "Evgeniy", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaul", |
| "middle": [], |
| "last": "Markovitch", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1611", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeniy Gabrilovich and Shaul Markovitch. 2007. Com- puting Semantic Relatedness Using Wikipedia-Based Explicit Semantic Analysis. In International Joint Conference on Artificial Intelligence, pages 1606- 1611.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "PPDB: The Paraphrase Database", |
| "authors": [ |
| { |
| "first": "Juri", |
| "middle": [], |
| "last": "Ganitkevitch", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "758--764", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juri Ganitkevitch, Benjamin Van Durme, and Chris Callison-Burch. 2013. PPDB: The Paraphrase Database. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, pages 758-764.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Collective Entity Resolution with Multi-Focal Attention", |
| "authors": [ |
| { |
| "first": "Nevena", |
| "middle": [], |
| "last": "Amir Globerson", |
| "suffix": "" |
| }, |
| { |
| "first": "Soumen", |
| "middle": [], |
| "last": "Lazic", |
| "suffix": "" |
| }, |
| { |
| "first": "Amarnag", |
| "middle": [], |
| "last": "Chakrabarti", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Subramanya", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Ringaard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir Globerson, Nevena Lazic, Soumen Chakrabarti, Amarnag Subramanya, Michael Ringaard, and Fer- nando Pereira. 2016. Collective Entity Resolution with Multi-Focal Attention. In Proceedings of the 54th", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Annual Meeting of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "621--631", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 621-631.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Joint Coreference Resolution and Named-Entity Linking with Multi-Pass Sieves", |
| "authors": [ |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Leila", |
| "middle": [], |
| "last": "Zilles", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Daniel", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Weld", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "289--299", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hannaneh Hajishirzi, Leila Zilles, Daniel S Weld, and Luke Zettlemoyer. 2013. Joint Coreference Res- olution and Named-Entity Linking with Multi-Pass Sieves. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 289-299.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Learning Entity Representation for Entity Disambiguation", |
| "authors": [ |
| { |
| "first": "Zhengyan", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujie", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Longkai", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Houfeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "30--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengyan He, Shujie Liu, Mu Li, Ming Zhou, Longkai Zhang, and Houfeng Wang. 2013. Learning Entity Representation for Entity Disambiguation. In Pro- ceedings of the 51st Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), pages 30-34.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Learning Distributed Representations of Sentences from Unlabelled Data", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1367--1377", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016a. Learning Distributed Representations of Sentences from Unlabelled Data. In Proceedings of the 2016 Conference of the North American Chapter of the As- sociation for Computational Linguistics: Human Lan- guage Technologies, pages 1367-1377.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Learning to Understand Phrases by Embedding the Dictionary", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "4", |
| "issue": "", |
| "pages": "17--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Kyunghyun Cho, Anna Korhonen, and Yoshua Bengio. 2016b. Learning to Understand Phrases by Embedding the Dictionary. Transactions of the Asso- ciation for Computational Linguistics, 4:17-30.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Robust Disambiguation of Named Entities in Text", |
| "authors": [ |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Hoffart", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohamed", |
| "middle": [ |
| "Amir" |
| ], |
| "last": "Yosef", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilaria", |
| "middle": [], |
| "last": "Bordino", |
| "suffix": "" |
| }, |
| { |
| "first": "Hagen", |
| "middle": [], |
| "last": "F\u00fcrstenau", |
| "suffix": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Pinkal", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Spaniol", |
| "suffix": "" |
| }, |
| { |
| "first": "Bilyana", |
| "middle": [], |
| "last": "Taneva", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Thater", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerhard", |
| "middle": [], |
| "last": "Weikum", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "782--792", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johannes Hoffart, Mohamed Amir Yosef, Ilaria Bordino, Hagen F\u00fcrstenau, Manfred Pinkal, Marc Spaniol, Bilyana Taneva, Stefan Thater, and Gerhard Weikum. 2011. Robust Disambiguation of Named Entities in Text. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 782-792.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Entity Hierarchy Embedding", |
| "authors": [ |
| { |
| "first": "Zhiting", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Poyao", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuntian", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingkai", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1292--1300", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiting Hu, Poyao Huang, Yuntian Deng, Yingkai Gao, and Eric Xing. 2015. Entity Hierarchy Embedding. In Proceedings of the 53rd Annual Meeting of the Associ- ation for Computational Linguistics and the 7th Inter- national Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers), pages 1292-1300.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A Neural Network for Factoid Question Answering over Paragraphs", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonardo", |
| "middle": [], |
| "last": "Claudino", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "633--644", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Iyyer, Jordan Boyd-Graber, Leonardo Claudino, Richard Socher, and Hal Daum\u00e9 III. 2014. A Neural Network for Factoid Question Answering over Para- graphs. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, pages 633-644.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Deep Unordered Composition Rivals Syntactic Methods for Text Classification", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Varun", |
| "middle": [], |
| "last": "Manjunatha", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1681--1691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Iyyer, Varun Manjunatha, Jordan Boyd-Graber, and Hal Daum\u00e9 III. 2015. Deep Unordered Com- position Rivals Syntactic Methods for Text Classifica- tion. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1681- 1691.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Knowledge Base Population Track", |
| "authors": [], |
| "year": null, |
| "venue": "Proceeding of Text Analytics Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Knowledge Base Population Track. In Proceeding of Text Analytics Conference.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Siamese CBOW: Optimizing Word Embeddings for Sentence Representations", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kenter", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Borisov", |
| "suffix": "" |
| }, |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "De Rijke", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "941--951", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kenter, Alexey Borisov, and Maarten de Rijke. 2016. Siamese CBOW: Optimizing Word Embed- dings for Sentence Representations. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 941-951.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Skip-Thought Vectors", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Ruslan", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Urtasun", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "Torralba", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fidler", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "28", |
| "issue": "", |
| "pages": "3294--3302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Kiros, Yukun Zhu, Ruslan R Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-Thought Vectors. In Ad- vances in Neural Information Processing Systems 28, pages 3294-3302.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Distributed Representations of Sentences and Documents", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 31st International Conference on Machine Learning", |
| "volume": "32", |
| "issue": "", |
| "pages": "1188--1196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quoc V. Le and Tomas Mikolov. 2014. Distributed Rep- resentations of Sentences and Documents. In Proceed- ings of the 31st International Conference on Machine Learning (Volume 32), pages 1188-1196.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Dependency-Based Word Embeddings", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy and Yoav Goldberg. 2014. Dependency- Based Word Embeddings. In Proceedings of the 52nd", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Annual Meeting of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "302--308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 302-308.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "A Hierarchical Neural Autoencoder for Paragraphs and Documents", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1106--1115", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Thang Luong, and Dan Jurafsky. 2015. A Hierarchical Neural Autoencoder for Paragraphs and Documents. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1106-1115.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification", |
| "authors": [ |
| { |
| "first": "Yuezhang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ronghuo", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Tian", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiting", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahul", |
| "middle": [], |
| "last": "Iyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Katia", |
| "middle": [], |
| "last": "Sycara", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 26th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2678--2688", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuezhang Li, Ronghuo Zheng, Tian Tian, Zhiting Hu, Rahul Iyer, and Katia Sycara. 2016. Joint Embed- ding of Hierarchical Categories and Entities for Con- cept Categorization and Dataless Classification. In Proceedings of the 26th International Conference on Computational Linguistics, pages 2678-2688.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Learning Entity and Relation Embeddings for Knowledge Graph Completion", |
| "authors": [ |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 29th AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "2181--2187", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yankai Lin, Zhiyuan Liu, Maosong Sun, Yang Liu, and Xuan Zhu. 2015. Learning Entity and Relation Em- beddings for Knowledge Graph Completion. In Pro- ceedings of the 29th AAAI Conference on Artificial In- telligence, pages 2181-2187.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Design Challenges for Entity Linking", |
| "authors": [ |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "315--328", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao Ling, Sameer Singh, and Daniel S. Weld. 2015. Design Challenges for Entity Linking. Transactions of the Association for Computational Linguistics, 3:315- 328.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Multiplicative Representations for Unsupervised Semantic Role Induction", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Boyang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "118--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Luan, Yangfeng Ji, Hannaneh Hajishirzi, and Boyang Li. 2016. Multiplicative Representations for Unsuper- vised Semantic Role Induction. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), pages 118-123.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A SICK Cure for the Evaluation of Compositional Distributional Semantic Models", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Marelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Menini", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Luisa", |
| "middle": [], |
| "last": "Bentivogli", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Zamparelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "216--223", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zampar- elli. 2014. A SICK Cure for the Evaluation of Compo- sitional Distributional Semantic Models. In Proceed- ings of the Ninth International Conference on Lan- guage Resources and Evaluation, pages 216-223.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Adding Semantics to Microblog Posts", |
| "authors": [ |
| { |
| "first": "Edgar", |
| "middle": [], |
| "last": "Meij", |
| "suffix": "" |
| }, |
| { |
| "first": "Wouter", |
| "middle": [], |
| "last": "Weerkamp", |
| "suffix": "" |
| }, |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "De Rijke", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Fifth ACM International Conference on Web Search and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "563--572", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edgar Meij, Wouter Weerkamp, and Maarten de Rijke. 2012. Adding Semantics to Microblog Posts. In Pro- ceedings of the Fifth ACM International Conference on Web Search and Data Mining, pages 563-572.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Wikify!: Linking Documents to Encyclopedic Knowledge", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Andras", |
| "middle": [], |
| "last": "Csomai", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Sixteenth ACM Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "233--242", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea and Andras Csomai. 2007. Wikify!: Linking Documents to Encyclopedic Knowledge. In Proceedings of the Sixteenth ACM Conference on In- formation and Knowledge Management, pages 233- 242.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Efficient Estimation of Word Representations in Vector Space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Greg Corrado, Kai Chen, and Jeffrey Dean. 2013a. Efficient Estimation of Word Repre- sentations in Vector Space. In Proceedings of the In- ternational Conference on Learning Representations, pages 1-12.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Distributed Representations of Words and Phrases and their Compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "26", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S. Cor- rado, and Jeff Dean. 2013b. Distributed Represen- tations of Words and Phrases and their Composition- ality. In Advances in Neural Information Processing Systems 26, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Learning to Link with Wikipedia", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Milne", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [ |
| "H" |
| ], |
| "last": "Witten", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceeding of the 17th ACM Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "509--518", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Milne and Ian H. Witten. 2008. Learning to Link with Wikipedia. In Proceeding of the 17th ACM Con- ference on Information and Knowledge Management, pages 509-518.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Vector-based Models of Semantic Composition", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "236--244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Mitchell and Mirella Lapata. 2008. Vector-based Models of Semantic Composition. In Proceedings of ACL-08: HLT, pages 236-244.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "GloVe: Global Vectors for Word Representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. GloVe: Global Vectors for Word Representation. In Proceedings of the 2014 Confer- ence on Empirical Methods in Natural Language Pro- cessing, pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Personalized Page Rank for Named Entity Disambiguation", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Pershina", |
| "suffix": "" |
| }, |
| { |
| "first": "Yifan", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "238--243", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maria Pershina, Yifan He, and Ralph Grishman. 2015. Personalized Page Rank for Named Entity Disam- biguation. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, pages 238-243.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Local and Global Algorithms for Disambiguation to Wikipedia", |
| "authors": [ |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Ratinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lev Ratinov, Dan Roth, Doug Downey, and Mike An- derson. 2011. Local and Global Algorithms for Dis- ambiguation to Wikipedia. In Proceedings of the 49th", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1375--1384", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 1375-1384.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Relation Extraction with Matrix Factorization and Universal Schemas", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Limin", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin M", |
| "middle": [], |
| "last": "Marlin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "74--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Riedel, Limin Yao, Andrew McCallum, and Benjamin M Marlin. 2013. Relation Extraction with Matrix Factorization and Universal Schemas. In Pro- ceedings of the 2013 Conference of the North Ameri- can Chapter of the Association for Computational Lin- guistics: Human Language Technologies, pages 74- 84.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Wikilinks: A Largescale Cross-Document Coreference Corpus Labeled via Links to Wikipedia", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Amarnag", |
| "middle": [], |
| "last": "Subramanya", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Singh, Amarnag Subramanya, Fernando Pereira, and Andrew McCallum. 2012. Wikilinks: A Large- scale Cross-Document Coreference Corpus Labeled via Links to Wikipedia. Technical Report UM-CS- 2012-015.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Theano: A Python Framework for Fast Computation of Mathematical Expressions", |
| "authors": [], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1605.02688v1" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theano Development Team. 2016. Theano: A Python Framework for Fast Computation of Mathematical Ex- pressions. arXiv preprint arXiv:1605.02688v1.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Lecture 6.5 -RMSProp, COURSERA: Neural Networks for Machine Learning", |
| "authors": [ |
| { |
| "first": "Tijmen", |
| "middle": [], |
| "last": "Tieleman", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tijmen Tieleman and Geoffrey Hinton. 2012. Lecture 6.5 -RMSProp, COURSERA: Neural Networks for Machine Learning. Technical report.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Representing Text for Joint Embedding of Text and Knowledge Bases", |
| "authors": [ |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Pantel", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoifung", |
| "middle": [], |
| "last": "Poon", |
| "suffix": "" |
| }, |
| { |
| "first": "Pallavi", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1499--1509", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kristina Toutanova, Danqi Chen, Patrick Pantel, Hoifung Poon, Pallavi Choudhury, and Michael Gamon. 2015. Representing Text for Joint Embedding of Text and Knowledge Bases. In Proceedings of the 2015 Con- ference on Empirical Methods in Natural Language Processing, pages 1499-1509.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Multilingual Relation Extraction using Compositional Universal Schema", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Verga", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Belanger", |
| "suffix": "" |
| }, |
| { |
| "first": "Emma", |
| "middle": [], |
| "last": "Strubell", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "886--896", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Verga, David Belanger, Emma Strubell, Ben- jamin Roth, and Andrew McCallum. 2016. Multi- lingual Relation Extraction using Compositional Uni- versal Schema. In Proceedings of the 2016 Confer- ence of the North American Chapter of the Associa- tion for Computational Linguistics: Human Language Technologies, pages 886-896.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Knowledge Graph and Text Jointly Embedding", |
| "authors": [ |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianwen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianlin", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1591--1601", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge Graph and Text Jointly Em- bedding. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, pages 1591-1601.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Towards Universal Paraphrastic Sentence Embeddings", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Wieting", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Wieting, Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2016. Towards Universal Paraphrastic Sen- tence Embeddings. In Proceedings of the 2016 Inter- national Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Full-Time Supervision based Bidirectional RNN for Factoid Question Answering", |
| "authors": [ |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wu-Jun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.05854v2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dong Xu and Wu-Jun Li. 2016. Full-Time Supervi- sion based Bidirectional RNN for Factoid Question Answering. arXiv preprint arXiv:1606.05854v2.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation", |
| "authors": [ |
| { |
| "first": "Ikuya", |
| "middle": [], |
| "last": "Yamada", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Shindo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideaki", |
| "middle": [], |
| "last": "Takeda", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshiyasu", |
| "middle": [], |
| "last": "Takefuji", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 20th SIGNLL Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "250--259", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ikuya Yamada, Hiroyuki Shindo, Hideaki Takeda, and Yoshiyasu Takefuji. 2016. Joint Learning of the Em- bedding of Words and Entities for Named Entity Dis- ambiguation. In Proceedings of the 20th SIGNLL Con- ference on Computational Natural Language Learn- ing, pages 250-259.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "text": "Architecture of our neural network for EL and QA tasks.", |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "num": null, |
| "text": "Name STS 2014 SICK News Forum OnWN Twitter Images Headlines NTEE (sentence) .74/.68 .56/.55 .72/.74 .75/.66 .82/.77 .69/.63 .71/.60 NTEE (paragraph) .74/.68 .52/.51 .66/.69 .74/.66 .77/.72 .68/.61 .69/.61 Fixed NTEE (sentence) .72/.69 .47/.46 .75/.78 .74/.67 .78/.74 .65/.61 .73/.61 Fixed NTEE (paragraph) .72/.69 .47/.47 .75/.78 .73/.67 .77/.74 .65/.61 .72/.61 Skip-gram .65/.67 .36/.39 .62/.69 .65/.66 .54/.56", |
| "content": "<table><tr><td/><td/><td>.62/.60</td><td>.66/.58</td></tr><tr><td>Skip-gram (plain)</td><td>.63/.65 .36/.39 .61/.69 .62/.62 .56/.57</td><td>.60/.58</td><td>.66/.58</td></tr><tr><td>CBOW</td><td>.58/.59 .35/.36 .57/.64 .70/.68 .54/.55</td><td>.57/.53</td><td>.61/.58</td></tr><tr><td>Skip-thought</td><td>.45/.44 .15/.14 .34/.39 .43/.42 .60/.55</td><td>.44/.43</td><td>.60/.57</td></tr><tr><td>Siamese CBOW</td><td>.59/.58 .41/.42 .61/.66 .73/.71 .65/.65</td><td>.64/.63</td><td>-</td></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "num": null, |
| "text": "Pearson's r and Spearman's p correlations of our models with the state-of-the-art models on semantic textual similarity task. Best scores, in terms of r, are marked in bold.", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "num": null, |
| "text": "Accuracies of the proposed method and the state-of-the-art methods.", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF7": { |
| "num": null, |
| "text": "Examples of top five similar words with their cosine similarities in our learned word representations compared with those of the skip-gram model.", |
| "content": "<table><tr><td>texts, and Luan et al. (2016) used semantic role la-</td></tr><tr><td>beling for generating contexts to train word embed-</td></tr><tr><td>ding. Moreover, a few recent studies on learning en-</td></tr><tr><td>tity embedding based on word embedding methods</td></tr><tr><td>have been reported</td></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF9": { |
| "num": null, |
| "text": "Examples of top five similar entities with their cosine similarities in our learned entity representations with those of the skip-gram model.", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |