| { |
| "paper_id": "U16-1006", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:10:52.103218Z" |
| }, |
| "title": "Unsupervised Pre-training With Sequence Reconstruction Loss for Deep Relation Extraction Models", |
| "authors": [ |
| { |
| "first": "Zhuang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "lizhuang144@gmail.com" |
| }, |
| { |
| "first": "Lizhen", |
| "middle": [], |
| "last": "Qu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Australian National University", |
| "location": {} |
| }, |
| "email": "lizhen.qu@data61.csiro.au" |
| }, |
| { |
| "first": "Qiongkai", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Australian National University", |
| "location": {} |
| }, |
| "email": "qiongkai.xu@data61.csiro.au" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Macquarie University", |
| "location": {} |
| }, |
| "email": "mark.johnson@mq.edu.au" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Data61", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Australia", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Unsupervised", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Relation extraction models based on deep learning have been attracting a lot of attention recently. Little research is carried out to reduce their need of labeled training data. In this work, we propose an unsupervised pre-training method based on the sequence-to-sequence model for deep relation extraction models. The pre-trained models need only half or even less training data to achieve equivalent performance as the same models without pre-training.", |
| "pdf_parse": { |
| "paper_id": "U16-1006", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Relation extraction models based on deep learning have been attracting a lot of attention recently. Little research is carried out to reduce their need of labeled training data. In this work, we propose an unsupervised pre-training method based on the sequence-to-sequence model for deep relation extraction models. The pre-trained models need only half or even less training data to achieve equivalent performance as the same models without pre-training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Relation extraction (RE) is the task of detecting and categorizing semantic relations between named entities mentioned in a text corpus. This is important for a wide variety of practical applications. For example, tourism planning bodies are interested in mining social media such as tweets to identifying which restaurants tourists eat in and which hotels those same tourists stay in.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "RE has been intensively studied for several years (Chan and Roth, 2011; Chan and Roth, 2010) . Recently, RE models based on deep neural networks (DNN) have achieved better performance than conventional RE models that rely on handcrafted features (Xu et al., 2015) . However, these DNN models require a large amount of annotated training data, which is difficult and expensive to obtain. The data problem is not completely solved by relying on methods such as large external knowledge bases and distant supervision because i) models employing only large knowledge bases often still perform poorly on RE (Angeli et al., 2014) ; ii) the external knowledge bases are incomplete; and iii) many important applications lack the relevant domain specific knowledge bases. This paper asks the question: can we use unlabeled data to help training DNN RE models? Although unsupervised pre-training is known to be effective for training deep neural networks, it remains unclear how to apply it to the recently proposed DNN RE models. The main advantage of deep models (compared to the shallow counterparts) is that they automatically learn distributed representations of the relevant components of the model (e.g., words, entities, relations, etc.). If we can encode rich syntactic-semantic patterns of relation expressions into the automatically learned, low-dimensional representations, and require these representations to be similar if they play a similar role using only unlabeled data, it should be possible for a DNN RE system to achieve a high level of generalization from only small amount of labeled data.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 71, |
| "text": "(Chan and Roth, 2011;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 72, |
| "end": 92, |
| "text": "Chan and Roth, 2010)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 246, |
| "end": 263, |
| "text": "(Xu et al., 2015)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 602, |
| "end": 623, |
| "text": "(Angeli et al., 2014)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In a relational expression, the named entities and words around it provide useful context information. For example, in the sentence \"By 1982 the BL Cars Ltd division renamed itself Austin Rover Group shortly before the launch of the Maestro.\" renamed itself is used much less often than an expression such as was founded in to indicate the relation org:foundedIn. Thus it is likely that was founded in will be found in the training set, even if renamed itself does not appear in the training set. Despite this, the co-occurrence of 1982 and Austin Rover Group, as well as keywords such as by, form a context that is similar to that of Austin Rover Group was founded in 1982. If such shared contextual information can require the similarity of the representations of these expressions, a classifier can easily infer that renamed itself is likely to indicate org:foundedIn. Inspired by observations such as these, we seek methods that exploit context information composed of words and named entities to learn representations of expressions, such that semantically similar expressions tend to have similar representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a pre-training method that generalizes well-known sequence-to-sequence model (Dai and Le, 2015) for deep RE models. This approach formalizes unsupervised pre-training as minimizing reconstruction errors of input sequences. For a given DNN RE model, our approach first pre-trains it on a large, unlabeled, domain-general corpus, and then fine-tunes it on target corpora. Our experiments show that, especially when the size of the labeled training data is small, the deep relation extraction models pretrained with our unsupervised pre-training method using half or even a quarter of the labeled data are able to achieve similar performance as the models without pre-training. Our unsupervised approach does not need domain-specific corpora for pre-training; in fact, they work well with 13,000 sentences randomly sampled from Wikipedia.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent advance of relation extraction demonstrates the power of deep learning by showing that the deep models significantly outperform the conventional approaches (Jiang and Zhai, 2007; Chan and Roth, 2010; Chan and Roth, 2011) on the ACE relation extraction datasets. Except for the FCM model (Yu et al., 2015) , at the core of almost all deep RE models are variants of convolutional neural networks (CNN) (Zeng et al., 2014; Nguyen and Grishman, 2015; Miwa and Bansal, 2016) , recurrent neural networks (RNN) (Zhang et al., 2015; Socher et al., 2012; Ebrahimi and Dou, 2015; Lin et al., 2016) , or both of them (Liu et al., 2015; Cai et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 185, |
| "text": "(Jiang and Zhai, 2007;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 186, |
| "end": 206, |
| "text": "Chan and Roth, 2010;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 207, |
| "end": 227, |
| "text": "Chan and Roth, 2011)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 294, |
| "end": 311, |
| "text": "(Yu et al., 2015)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 407, |
| "end": 426, |
| "text": "(Zeng et al., 2014;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 427, |
| "end": 453, |
| "text": "Nguyen and Grishman, 2015;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 454, |
| "end": 476, |
| "text": "Miwa and Bansal, 2016)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 511, |
| "end": 531, |
| "text": "(Zhang et al., 2015;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 532, |
| "end": 552, |
| "text": "Socher et al., 2012;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 553, |
| "end": 576, |
| "text": "Ebrahimi and Dou, 2015;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 577, |
| "end": 594, |
| "text": "Lin et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 613, |
| "end": 631, |
| "text": "(Liu et al., 2015;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 632, |
| "end": 649, |
| "text": "Cai et al., 2016)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Several RE systems (Chen et al., 2006a; GuoDong et al., 2009; Li et al., 2010; LongHua and Qiaoming, 2008; Chen et al., 2006b; Kim and Lee, 2012) are built upon the semi-supervised learning algorithm label propagation to exploit the use of unlabeled data. This family of algorithms start with building a similarity graph between each pair of relation mentions, and propagate relation labels from labeled ones to unlabeled ones. However, deep RE models require substantial change in order to use these algorithms, while our methods just need to replace the training criterion during pre-training, which is easy-to-implement by using a standard deep learning toolkit. It is also too expensive to involve all unlabeled data in both training and prediction processes for each target dataset. In contrast, our pre-training algorithms are performed only once on a general corpus and the resulted models are fine-tuned only on target corpora.", |
| "cite_spans": [ |
| { |
| "start": 19, |
| "end": 39, |
| "text": "(Chen et al., 2006a;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 40, |
| "end": 61, |
| "text": "GuoDong et al., 2009;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 62, |
| "end": 78, |
| "text": "Li et al., 2010;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 79, |
| "end": 106, |
| "text": "LongHua and Qiaoming, 2008;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 107, |
| "end": 126, |
| "text": "Chen et al., 2006b;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 127, |
| "end": 145, |
| "text": "Kim and Lee, 2012)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There is also ample of work exploring the idea of distant supervision for knowledge base completion (Riedel et al., 2013; Yang et al., 2014; in order to avoid the use of manually labeled data. Although some of these models include a relation extraction component (Surdeanu et al., 2012; Angeli et al., 2014; Toutanova et al., 2015) , the outputs of their systems are whether a relation holds between entities rather than entity mentions. In contrast, we aim to classify relation mentions no matter if a target relation exists in a knowledge base or not.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 121, |
| "text": "(Riedel et al., 2013;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 122, |
| "end": 140, |
| "text": "Yang et al., 2014;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 263, |
| "end": 286, |
| "text": "(Surdeanu et al., 2012;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 287, |
| "end": 307, |
| "text": "Angeli et al., 2014;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 308, |
| "end": 331, |
| "text": "Toutanova et al., 2015)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There have also been other efforts towards minimizing the use of labeled data. In (Sun, 2009) , they proposed a bootstrapping approach to extract textual patterns for training a SVM-based relation extraction system. In (Chan and Roth, 2011), they show that supervised models equipped with syntactico-semantic features are capable of classifying relation mentions with a few labeled data. However, both work are customized for supervised models with handcrafted features and relations between nominals. In other lines of research, active learning (Fu and Grishman, 2013; Sun and Grishman, 2012) and domain adaptation (Nguyen and Grishman, 2014) pursued to select high quality training examples for training relation extraction models. Jiang (2009) leverages the knowledge of known relations to predict new relations in a weakly supervised setting. These approaches have different problem settings than ours, which focus on the use of unlabeled data.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 93, |
| "text": "(Sun, 2009)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 546, |
| "end": 569, |
| "text": "(Fu and Grishman, 2013;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 570, |
| "end": 593, |
| "text": "Sun and Grishman, 2012)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 734, |
| "end": 746, |
| "text": "Jiang (2009)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Since 2006, various pre-training techniques are proposed to make the training of deep neural networks practical (Hinton and Salakhutdinov, 2006; Dahl et al., 2010; Bengio, 2009) . They are not universally applicable for all problems and most of them focus on computer vision problems. To the best of our knowledge, we are the first to explore the use of pre-training for deep RE models.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 144, |
| "text": "(Hinton and Salakhutdinov, 2006;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 145, |
| "end": 163, |
| "text": "Dahl et al., 2010;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 164, |
| "end": 177, |
| "text": "Bengio, 2009)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Suppose we are given a relation mention, which is a pair of named entity mentions (m h , m t ) together with its relation expression in a sentence S. Each mention m is disambiguated into an entity e. Let x \u2208 X denote a relation mention, where X is the space of all relation mentions, RE models assign a binary relation y \u2208 Y to x, where Y is a finite set of all possible relations. As a result, an RE model is a function g : X \u2192 Y.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Relation Extraction Models", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(x 1 , y 1 ), ..., (x n , y n ) \u2208 X \u00d7 Y, we can directly learn an RE model by minimiz- ing a supervised loss function L s : X \u00d7 Y \u2192 R.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "In absence of sufficient supervised training data, we resort to a two-stage approach. In the first stage, we pre-train RE models on a dataset annotated with named entity mentions and their corresponding entities by minimizing an unsupervised loss L u : X \u2192 R. In the second stage, we finetune the pre-trained models on the labeled dataset by applying the supervised loss L s . In our experiments, L s is the cross-entropy loss, as a result of applying multi-class logistic regression (LR) in the supervised setting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "The deep RE models proposed recently are variants of Long Short Term Memory (LSTM) (Graves and Schmidhuber, 2005) and Convolutional Neural Networks (CNN) (Krizhevsky et al., 2012) . As representative examples we consider three recent RE models: i) bidirectional LSTM that takes words around entity mentions as input (Zhang et al., 2015) , coined BiLSTM; ii) LSTM taking shortest paths in dependency trees as input, coined Dep-TreeLSTM; iii) CNN taking words sequences and position embeddings as input (dos Santos et al., 2015), coined PCNN.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 113, |
| "text": "(Graves and Schmidhuber, 2005)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 154, |
| "end": 179, |
| "text": "(Krizhevsky et al., 2012)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 316, |
| "end": 336, |
| "text": "(Zhang et al., 2015)", |
| "ref_id": "BIBREF49" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "All three RE models consist of four components. As illustrated in Figure 1 , as input they take either word sequences between two entity mentions or the shortest dependency path between two entity mentions. A look up table maps each input word into its word embedding. Herein we denote the word embedding of a word i by e i \u2208 R M , where M is the dimension of word embeddings. All word embeddings are initialized with the ones pre-trained on a large domain-general corpus (Qu et al., 2015) . As suggested in (Qu et al., 2015) , we do not update these word embeddings during training to avoid overfitting. In the next step, a feature learning component projects the embeddings into a hidden representation h. If it is in a supervised setting, both h and handcrafted features are taken as the input of a multi-class LR classifier for categorizing target relations. In case of unsupervised pre-training, h is fed into a classifier for a designated unsupervised predictive task.", |
| "cite_spans": [ |
| { |
| "start": 472, |
| "end": 489, |
| "text": "(Qu et al., 2015)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 508, |
| "end": 525, |
| "text": "(Qu et al., 2015)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 66, |
| "end": 74, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "The RE models based on BiLSTM and TreeL-STM are extensions of LSTM. LSTM is a recurrent neural network capable of capturing long dependencies (Graves and Schmidhuber, 2005) . At the t-th time step, the LSTM layer takes the form:", |
| "cite_spans": [ |
| { |
| "start": 142, |
| "end": 172, |
| "text": "(Graves and Schmidhuber, 2005)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "u t , c t = LSTM(x t , u t\u22121 , c t\u22121 )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "where x t is the input to LSTM at time step t, and u t and c t are the hidden states and memory states of LSTM at time step t, respectively. BiLSTM reads an input word sequence in both directions with two separate LSTM layers. As illustrated in Figure 2c , one LSTM reads the word sequence between two entity mentions in forwards direction, while the other with shared parameters reads the same sequence in the reverse direction. As a result, they generate two hidden representations \u2212 \u2192 h and \u2190 \u2212 h , which are further concatenated to form the input vector h of the classifier. DepTreeLSTM takes as input the shortest path between two entity mention in a syntactic dependency tree. The shortest path consists of two subpaths, which starts from an entity mention and ends at their lowest common ancestor. Since both subpaths are word sequences, as shown in Figure 2a , the feature learning component is composed of two LSTM layers with shared parameters to read the two subpaths respectively. The resulted two representations are concatenated as the input of the classifier. This model can be viewed either as the model proposed in (Ebrahimi and Dou, 2015) by replacing the recursive neural networks with LSTM, or as simplifying the model proposed in (Xu et al., 2015) by removing the max-pooling layer. The max-pooling layer leads to degraded performance in our preliminary experiments. PCNN implements the model in (dos Santos et al., 2015), which takes as input the word sequence between two entity mentions. It starts with mapping each input word to its word embedding. Each word embedding is further concatenated with its position embedding, which encodes relative distance of the word w.r.t. each entity mention. To cope with input word sequences of varying length, the embedding sequences smaller than the pre-specified maximal length are padded with the embedding of the padding token. Then a convolutional layer and a max pooling layer are applied in sequel to generate the input h for the classifier.", |
| "cite_spans": [ |
| { |
| "start": 1133, |
| "end": 1157, |
| "text": "(Ebrahimi and Dou, 2015)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1252, |
| "end": 1269, |
| "text": "(Xu et al., 2015)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 254, |
| "text": "Figure 2c", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 857, |
| "end": 867, |
| "text": "Figure 2a", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "For all three models, we augment them with the handcrafted features used in the top conventional RE systems that do not rely on deep learning techniques. They lead to improved results according to our preliminary experiments. In particular, we include lexical, collocation, and dependency features proposed in (Chan and Roth, 2010). The other features used in (Chan and Roth, 2010) are dropped because the relevant information is not available in our target datasets. In addition, we implemented the POS features and the base phrase chunk features introduced in (Chan and Roth, 2011). struction loss.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Given a training set", |
| "sec_num": null |
| }, |
| { |
| "text": "Entities often provide vital information for relation extraction (Chan and Roth, 2010). Qu et al. (2015) show that the extraction of entity mentions benefits significantly from distributional similarity, thus we learn entity embeddings by using the Skip-gram model (Mikolov et al., 2013) . An entity mention such as Austin Rover Group often spans more than one word, while the Skip-gram model works on sequences of tokens. Therefore we retokenize text by mapping each entity mention into a single token, and replace them with the IDs of the referred entities.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 104, |
| "text": "Qu et al. (2015)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 265, |
| "end": 287, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Entity Embeddings", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The domain specific RE corpora are often small. The retokenization of documents further leads to a substantial number of infrequent entity tokens. We can only obtain embeddings of poor quality for these tokens if we train them from scratch (Collobert et al., 2011) . To circumvent the problems, we employ a stepwise strategy. First, we initialize all word embeddings with the pre-trained ones on a large corpus (Qu et al., 2015) . Second, we initialize each entity embedding by averaging the embeddings of all the words ever occurred in its mentions, following (Socher et al., 2013) . Third, we update only entity embeddings by using the Skip-Gram model. This allows us to update them with an aggressive learning rate since we expect a large change of these embeddings. And we keep the pre-trained word embeddings intact to preserve the knowledge of distributional similarity learned from a large general corpus, as suggested in (Qu et al., 2015) . After training with the Skip-gram model, we also do not update these entity embeddings while training with the deep RE models because updating these embeddings was not shown to be useful in our preliminary experiments.", |
| "cite_spans": [ |
| { |
| "start": 240, |
| "end": 264, |
| "text": "(Collobert et al., 2011)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 411, |
| "end": 428, |
| "text": "(Qu et al., 2015)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 561, |
| "end": 582, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 929, |
| "end": 946, |
| "text": "(Qu et al., 2015)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Entity Embeddings", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Given pre-trained word and entity embeddings, the randomly initialized deep RE models still suffer from poor performance if the target training datasets are too small compared to their vast number of model parameters. Inspired by Autoencoders (Vincent et al., 2010) , our key idea is to obtain high quality representations by reconstructing the corresponding inputs. During the process of reconstruction, if two expressions share similar context, we expect that they end up with having similar representations.", |
| "cite_spans": [ |
| { |
| "start": 243, |
| "end": 265, |
| "text": "(Vincent et al., 2010)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We draw inspiration from the semi-supervised sequence-to-sequence (seq2seq) model (Dai and Le, 2015) for pre-training deep RE models. Its underlying seq2seq (Sutskever et al., 2014) model consists of an LSTM encoder and an LSTM decoder. The encoder reads a sequence of words and map them into a hidden representation. Then the decoder takes the representation as input and predicts the most likely sequence of words. The training objective is to minimize the discrepancy between the predicted sequence and the input sequence.", |
| "cite_spans": [ |
| { |
| "start": 157, |
| "end": 181, |
| "text": "(Sutskever et al., 2014)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "All of the three deep RE models presented in Sec 3 take as input word sequences, and generate a hidden representation h for the classifier. Our key idea of generalizing the semi-supervised seq2seq model is to reuse the feature learning component h(x) as the encoder and reconstruct the input sequence in each direction by using an LSTM decoder. The change of encoder is particularly interesting for PCNN , which adopts a different type of model than the decoder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Given an entity mention pair, the input of both PCNN and BiLSTM is the word sequence between both mentions and the mentions themselves. PCNN applies CNN to read the input sequence in both forwards and backwards directions, and results in two hidden representations \u2212 \u2192 h and \u2190 \u2212 h respectively. Its LSTM decoder reads each representation and reconstructs the input sequence in the corresponding direction, respectively. In the same manner, BiLSTM applies the two LSTM layers to read and reconstruct input sequences in both directions. Although DepTreeLSTM takes input from dependency trees, it follows the same way as the other two models by reconstructing two word sequences in their respective reading direction. Herein, each sequence is read from the entity mention to their lowest common ancestor.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The LSTM decoder consists of an LSTM in the form of Equation (1) and a softmax classifier. At time step t, the LSTM layer reads the previous hidden state u t\u22121 and the predicted word x t\u22121 at time step t \u2212 1, followed by generating the current hidden state u t . The current hidden state u t is fed into the softmax classifier to predict the word x t , where the softmax classifier is defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "P (x = j|u t ) = exp(e T j u t ) |V| k=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "exp(e T k u t ) where V denotes the vocabulary. When t = 1, the LSTM initializes the initial state as u and c 0 = 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For the sake of computational efficiency, we minimize the reconstruction loss by approximating the cross-entropy loss of the softmax function by using the negative sampling technique in (Mikolov et al., 2013) . As a result, at the t-th time step during decoding, we minimize", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 208, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2212 log \u03c3(e T xt u t ) \u2212 k j=1 E x j \u223cPn(x) log \u03c3(\u2212e T x j u t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where x t is the corresponding word observed in the input sequence, \u03c3 denotes the sigmoid function, and P n (x) is the noise distribution for drawing k negative samples. In our experiments, we employ uniform distribution as the noise distribution. Then the loss function L u is the sum of the above loss over all words in input sequences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "5 Experimental Setup", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Reconstruction Loss", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We use the Stanford Relation Extraction corpus (StanfordRE) (Angeli et al., 2014) as the target corpus for evaluation. Each entity mention is associated with a canonical name. We map each canonical name to an entity ID in two ways. If the canonical name can be found in Freebase, we replace the mention with its Freebase machine ID. Otherwise we replace the mention with the ID based on its canonical name. In addition, we filter out the relation mentions with a annotator agreement lower than 80% as well as the ones labeled as no relation, because they are the source of label noise based on our manual inspection. This is beyond the scope of this work. As a result, we obtain 9150 relation mentions and 40 relations in total. Among all relation mentions in the StanfordRE corpus, we hold out 20% relation mentions for testing, 10% for development, and the remaining for training. In order to test the impact of the volume of training data for fine-tuning, we split the training portion of the corpus into 10 partitions based on a log scale, and created 10 successively larger training sets S 1 , S 2 , ..., S 10 by merging these partitions from smallest to largest. As a result, S i+1 is twice the size of the S i and S 10 is the full training set.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 81, |
| "text": "(Angeli et al., 2014)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Evaluation Protocol", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "For pre-training, we use the FIGER corpus, which is a sample of Wikipedia annotated with millions of entity mentions (Desmet and Hoste, 2014) . Because each entity mention is also linked to a canonical name, we convert each mention to an entity ID in the same way as for StanfordRE.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 141, |
| "text": "(Desmet and Hoste, 2014)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Evaluation Protocol", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To investigate the influence of size of pretraining corpora, we create three corpora for pretraining:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Evaluation Protocol", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "(i) StanfordWiki: to verify if the relation mentions from the FIGER corpus, whose entity mention pairs also occur in target corpora, are most relevant during pre-training, we collect all sentences, in which there are at least one entity pair occurring also in a sentence from the StanfordRE corpus. Then we merge them with the StanfordRE to build a corpus, which contains 133,793 relation mentions in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Evaluation Protocol", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "(ii) WikiRandom: we randomly sample five non-overlapped subsets from the FIGER corpus, each of them contains similar number of relation mentions as StanfordWiki.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Evaluation Protocol", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "(iii) WikiWhole: we collect all sentences in the whole FIGER corpus, which contain at least two entity mentions. As a result, we get 1,004,831 sentences and 3,886,998 relation mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Evaluation Protocol", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In this paper, we mainly present the pre-training results of all models on WikiRandom, because i) they are similar to those on StanfordWiki and WikiWhole; ii) random sentence samples are easy to acquire. For the experiments on WikiRandom, we perform one run on each of the five random samples, report averaged micro-F1 scores over all five runs as well as their standard deviations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Evaluation Protocol", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We compared pre-trained deep RE models with their randomly initialized counterparts, which differ in their input features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Handcrafted: an LR classifier with the same handcrafted features as the deep RE models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Avg embed: deep RE models with handcrafted features, pre-trained word embeddings, and entity embeddings generated by averaging the embeddings of the words occurred in mentions. The model parameters of the feature learning component and the LR classifier are randomly initialized.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Random stepwise: deep RE models with handcrafted features, pre-trained word embeddings, and entity embeddings trained by our stepwise training strategy. Their model parameters are randomly initialized in the same way as avg embed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We compare both LSTM based RE models in two different settings of pre-training: i) the LSTM in the decoder does not share parameters with the LSTM in the feature learning component; ii) both LSTM layers share parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Given small training datasets, the performance of neural network models often depend on randomly initialized parameters, thus we perform five runs with different random initialization and report the averaged micro-F1 score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In our experiments, we reuse the 200-dimensional pre-trained word embeddings based on the Skipgram model from our prior work (Qu et al., 2015) . The corresponding negative samples is 10 and the size of local context window is 5. During stepwise training, all entity embeddings are fine-tuned with a learning rate 0.001 for 50 epochs within a local context window of size 5, the number of negative samples is set to 10. For both LSTM variants, we implemented LSTM in the same way as in , the dimension of hidden units is fixed to 200. For PCNN , the dimension of each position embedding is 70, as in (dos Santos et al., 2015) , the size of the context window is 3, and the output of the convolutional layer consists of 200 hidden units. During pre-training, the number of negative samples is set to 10. In both pre-training and fine-tuning, we adopt Ada-Grad (Duchi et al., 2011) and L2 regularizer for optimization. We tune all hyperparameters on the development set. As a result, the initial learning rates of AdaGrad is 0.1 for both LSTM variants and 0.05 for PCNN during pre-training, and it is fixed to 0.05 during supervised training. For all models, the hyperparameter of L2 regularization is fixed to 10E \u22126 .", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 142, |
| "text": "(Qu et al., 2015)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 599, |
| "end": 624, |
| "text": "(dos Santos et al., 2015)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 858, |
| "end": 878, |
| "text": "(Duchi et al., 2011)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "As illustrated in Figure 3 , all deep RE models pre-trained with the best method outperform the baselines with a wide margin unless the full train set is used. And the performance of these pretrained models has small variance across all five random training samples. Among all these models, pre-trained DepTreeLSTM is the best performing model on StanfordRE on average. The pre-trained BiLSTM achieves the largest improvement w.r.t. its randomly initialized counterpart with the entity embeddings computed by averaging word embeddings. It needs merely 800 sentences to achieve similar performance as the randomly initialized one trained on 3200 sentences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 26, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Both LSTM based models show that it is better off not sharing the parameters of LSTM between encoders and decoders. Otherwise they achieve only similar performance as the best baselines. We also observe that the gap between both pre-trained LSTM variants and their competitors narrows as the size of the in-domain training data grows more than 1000 sentences. For them, pre-training is only useful when training data is small.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In contrast, the pre-trained PCNN follows a different trend by achieving the highest improvement over the random initialized one when there are more than 3200 target relation mentions for training. Without pre-training, PCNN performs even worse than the baseline with handcrafted features unless the full training set is used. We conjecture, the opposite trend is caused by the high variance introduced by max-pooling and the learning of position embeddings. This model is indeed more difficult to train than the other two models, because it obtains the highest variance among all three models when parameters are randomly initialized. Despite this, the pre-training provides significantly better initialization of model parameters and leads to small variance across all pre-training samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The stepwise training strategy is helpful for improving entity embeddings regardless the type of models, as shown in Figure 3 . However, it is also not the main power booster during pre-training as the largest improvement is achieved always by unsupervised training losses. In case of BiLSTM , the improvement over the averaged word embeddings becomes clear when more than 800 training instances are used.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 117, |
| "end": 125, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In order to gain a deeper understanding of the effect of pre-training, we compare the representations generated by the pre-trained models with the ones without pre-training. We compare them also at the begin and the end of fine-tuning respectively. In particular, we apply T-SNE (Maaten and Hinton, 2008) to visualize the expression representations generated by the feature learning component h(x) of PCNN . As the Figure 4 illustrates, compare to the randomly initialized PCNN , at the begin of fine-tuning, we are more likely to find the representations closed to each other with the one pre-trained with the sequence reconstruction loss, if the corresponding expressions express the same relation. It is an evidence of our high-level intuition: our unsupervised pre-training losses are able to build similar representations for similar relation expressions. After fine-tuning, the expressions of the same relation form more compact clusters by the pre-trained model than by the randomly initialized one. This explains the performance improvement achieved by the pre-trained PCNN . The size and sampling strategies of unlabeled data have little influence on pre-training. Figure 5 shows that all models achieve similar results on random samples as on WikiRandom. Using the whole FIGER corpus leads to a marginal improvement up to 3% F1 score. This suggests that a few thousand randomly selected sentences are sufficient for achieving the pre-training effect with this sequence reconstruction loss. ", |
| "cite_spans": [ |
| { |
| "start": 279, |
| "end": 304, |
| "text": "(Maaten and Hinton, 2008)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 415, |
| "end": 423, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1174, |
| "end": 1184, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In the absence of large amount of manually labeled training data, we propose the sequence reconstruction loss as a generalization of semi-supervised seq2seq model for pre-training deep RE models. The pre-trained models achieve competitive performance as their counterparts without pretraining while employing merely half or even a quarter of the training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Unsupervised Pre-trainingInspired by the semi-supervised sequence-tosequence model (Dai and Le, 2015), our unsupervised pre-training methods tackle the learning of deep RE models in two steps. First, we learn entity embeddings by using a stepwise training strategy. Second, we train the feature learning components h(x) of deep RE models by using sequence recon-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research was supported by NICTA, funded by the Australian Government through the Department of Communications and the Australian Research Council through the ICT Centre of Excellence Program.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Combining distant and partial supervision for relation extraction", |
| "authors": [ |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Julie", |
| "middle": [], |
| "last": "Tibshirani", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1556--1567", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gabor Angeli, Julie Tibshirani, Jean Wu, and Christo- pher D. Manning. 2014. Combining distant and partial supervision for relation extraction. In Pro- ceedings of the 2014 Conference on Empirical Meth- ods in Natural Language Processing, October 25- 29, Doha, Qatar, pages 1556-1567.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning deep architectures for ai. Foundations and trends R in Machine Learning", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio. 2009. Learning deep architectures for ai. Foundations and trends R in Machine Learning, 2(1):1-127.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Translating embeddings for modeling multirelational data", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Garcia-Duran", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Oksana", |
| "middle": [], |
| "last": "Yakhnenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2787--2795", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Bordes, Nicolas Usunier, Alberto Garcia- Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi- relational data. In Advances in Neural Information Processing Systems, pages 2787-2795.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Bidirectional recurrent convolutional neural network for relation classification", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Houfeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Cai, Xiaodong Zhang, and Houfeng Wang. 2016. Bidirectional recurrent convolutional neural network for relation classification. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics, ACL 2016, August 7-12, 2016, Berlin, Germany, Volume 1: Long Papers.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Exploiting background knowledge for relation extraction", |
| "authors": [ |
| { |
| "first": "Yee", |
| "middle": [], |
| "last": "Seng", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "23rd International Conference on Computational Linguistics, Proceedings of the Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "152--160", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee Seng Chan and Dan Roth. 2010. Exploiting background knowledge for relation extraction. In 23rd International Conference on Computational Linguistics, Proceedings of the Conference, 23-27 August 2010, Beijing, China, pages 152-160.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Exploiting syntactico-semantic structures for relation extraction", |
| "authors": [ |
| { |
| "first": "Yee", |
| "middle": [], |
| "last": "Seng", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "551--560", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee Seng Chan and Dan Roth. 2011. Exploiting syntactico-semantic structures for relation extrac- tion. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics, pages 551-560. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Relation extraction using label propagation based semi-supervised learning", |
| "authors": [ |
| { |
| "first": "Jinxiu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghong", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengyu", |
| "middle": [], |
| "last": "Chew Lim Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "129--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinxiu Chen, Donghong Ji, Chew Lim Tan, and Zhengyu Niu. 2006a. Relation extraction using label propagation based semi-supervised learning. In Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Lin- guistics, pages 129-136. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Semi-supervised relation extraction with label propagation", |
| "authors": [ |
| { |
| "first": "Jinxiu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghong", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengyu", |
| "middle": [], |
| "last": "Chew Lim Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Human Language Technology Conference of the NAACL, Companion Volume: Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "25--28", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinxiu Chen, Donghong Ji, Chew Lim Tan, and Zhengyu Niu. 2006b. Semi-supervised relation ex- traction with label propagation. In Proceedings of the Human Language Technology Conference of the NAACL, Companion Volume: Short Papers, pages 25-28. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research, 12(Aug):2493-2537.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Phone recognition with the mean-covariance restricted boltzmann machine", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Dahl", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdel-Rahman", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "469--477", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Dahl, Abdel-rahman Mohamed, Geoffrey E Hinton, et al. 2010. Phone recognition with the mean-covariance restricted boltzmann machine. In Advances in neural information processing systems, pages 469-477.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Semisupervised sequence learning", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3061--3069", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew M Dai and Quoc V Le. 2015. Semi- supervised sequence learning. In Advances in Neu- ral Information Processing Systems, pages 3061- 3069.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Finegrained dutch named entity recognition. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Desmet", |
| "suffix": "" |
| }, |
| { |
| "first": "V\u00e9ronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "48", |
| "issue": "", |
| "pages": "307--343", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bart Desmet and V\u00e9ronique Hoste. 2014. Fine- grained dutch named entity recognition. Language Resources and Evaluation, 48(2):307-343.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Classifying relations by ranking with convolutional neural networks", |
| "authors": [ |
| { |
| "first": "C\u00edcero", |
| "middle": [], |
| "last": "Nogueira Dos Santos", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing of the Asian Federation of Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "626--634", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C\u00edcero Nogueira dos Santos, Bing Xiang, and Bowen Zhou. 2015. Classifying relations by ranking with convolutional neural networks. In Proceed- ings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th Interna- tional Joint Conference on Natural Language Pro- cessing of the Asian Federation of Natural Language Processing, ACL 2015, July 26-31, 2015, Beijing, China, pages 626-634.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Adaptive subgradient methods for online learning and stochastic optimization", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Duchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Elad", |
| "middle": [], |
| "last": "Hazan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoram", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2121--2159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12(Jul):2121-2159.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Chain based RNN for relation classification", |
| "authors": [ |
| { |
| "first": "Javid", |
| "middle": [], |
| "last": "Ebrahimi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dejing", |
| "middle": [], |
| "last": "Dou", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "The 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1244--1249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Javid Ebrahimi and Dejing Dou. 2015. Chain based RNN for relation classification. In NAACL HLT 2015, The 2015 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Denver, Colorado, USA, May 31 -June 5, 2015, pages 1244- 1249.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "An efficient active learning framework for new relation types", |
| "authors": [ |
| { |
| "first": "Lisheng", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "692--698", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lisheng Fu and Ralph Grishman. 2013. An efficient active learning framework for new relation types. In IJCNLP, pages 692-698.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Framewise phoneme classification with bidirectional lstm and other neural network architectures", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Neural Networks", |
| "volume": "18", |
| "issue": "5", |
| "pages": "602--610", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Graves and J\u00fcrgen Schmidhuber. 2005. Frame- wise phoneme classification with bidirectional lstm and other neural network architectures. Neural Net- works, 18(5):602-610.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Label propagation via bootstrapped support vectors for semantic relation extraction between named entities", |
| "authors": [ |
| { |
| "first": "Zhou", |
| "middle": [], |
| "last": "Guodong", |
| "suffix": "" |
| }, |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Longhua", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhu", |
| "middle": [], |
| "last": "Qiaoming", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Computer Speech & Language", |
| "volume": "23", |
| "issue": "4", |
| "pages": "464--478", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhou GuoDong, Qian LongHua, and Zhu QiaoMing. 2009. Label propagation via bootstrapped sup- port vectors for semantic relation extraction between named entities. Computer Speech & Language, 23(4):464-478.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Reducing the dimensionality of data with neural networks", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Geoffrey", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan R", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Science", |
| "volume": "313", |
| "issue": "5786", |
| "pages": "504--507", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey E Hinton and Ruslan R Salakhutdinov. 2006. Reducing the dimensionality of data with neural net- works. Science, 313(5786):504-507.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A systematic exploration of the feature space for relation extraction", |
| "authors": [ |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengxiang", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "113--120", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing Jiang and ChengXiang Zhai. 2007. A systematic exploration of the feature space for relation extrac- tion. In HLT-NAACL, pages 113-120.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Multi-task transfer learning for weakly-supervised relation extraction", |
| "authors": [ |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "2", |
| "issue": "", |
| "pages": "1012--1020", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing Jiang. 2009. Multi-task transfer learning for weakly-supervised relation extraction. In Proceed- ings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP: Volume 2-Volume 2, pages 1012-1020. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A graph-based cross-lingual projection approach for weakly supervised relation extraction", |
| "authors": [ |
| { |
| "first": "Seokhwan", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Gary Geunbae", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Short Papers", |
| "volume": "2", |
| "issue": "", |
| "pages": "48--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seokhwan Kim and Gary Geunbae Lee. 2012. A graph-based cross-lingual projection approach for weakly supervised relation extraction. In Proceed- ings of the 50th Annual Meeting of the Associa- tion for Computational Linguistics: Short Papers- Volume 2, pages 48-53. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Imagenet classification with deep convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1097--1105", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hin- ton. 2012. Imagenet classification with deep con- volutional neural networks. In Advances in neural information processing systems, pages 1097-1105.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Semantic relation extraction based on semisupervised learning", |
| "authors": [ |
| { |
| "first": "Haibo", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yutaka", |
| "middle": [], |
| "last": "Matsuo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitsuru", |
| "middle": [], |
| "last": "Ishizuka", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Asia Information Retrieval Symposium", |
| "volume": "", |
| "issue": "", |
| "pages": "270--279", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haibo Li, Yutaka Matsuo, and Mitsuru Ishizuka. 2010. Semantic relation extraction based on semi- supervised learning. In Asia Information Retrieval Symposium, pages 270-279. Springer.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Neural relation extraction with selective attention over instances", |
| "authors": [ |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiqi", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Huanbo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yankai Lin, Shiqi Shen, Zhiyuan Liu, Huanbo Luan, and Maosong Sun. 2016. Neural relation extraction with selective attention over instances. In Proceed- ings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016, August 7- 12, 2016, Berlin, Germany, Volume 1: Long Papers.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A dependency-based neural network for relation classification", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Houfeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing of the Asian Federation of Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "285--290", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu, Furu Wei, Sujian Li, Heng Ji, Ming Zhou, and Houfeng Wang. 2015. A dependency-based neural network for relation classification. In Pro- ceedings of the 53rd Annual Meeting of the Associ- ation for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Processing of the Asian Federation of Natural Lan- guage Processing, ACL 2015, July 26-31, 2015, Bei- jing, China, Volume 2: Short Papers, pages 285- 290.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Semi-supervised learning for relation extraction", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "I" |
| ], |
| "last": "Zhou Guodong", |
| "suffix": "" |
| }, |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Junhui", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhu", |
| "middle": [], |
| "last": "Longhua", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Qiaoming", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Third International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "ZHOU GuoDong LI JunHui QIAN LongHua and ZHU Qiaoming. 2008. Semi-supervised learning for re- lation extraction. In Third International Joint Con- ference on Natural Language Processing, page 32.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Visualizing data using t-sne", |
| "authors": [ |
| { |
| "first": "Laurens", |
| "middle": [], |
| "last": "Van Der Maaten", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "9", |
| "issue": "", |
| "pages": "2579--2605", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laurens van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-sne. Journal of Machine Learning Research, 9(Nov):2579-2605.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "End-to-end relation extraction using lstms on sequences and tree structures", |
| "authors": [ |
| { |
| "first": "Makoto", |
| "middle": [], |
| "last": "Miwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Makoto Miwa and Mohit Bansal. 2016. End-to-end re- lation extraction using lstms on sequences and tree structures. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguis- tics, ACL 2016, August 7-12, 2016, Berlin, Ger- many, Volume 1: Long Papers.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Employing word representations and regularization for domain adaptation of relation extraction", |
| "authors": [ |
| { |
| "first": "Huu", |
| "middle": [], |
| "last": "Thien", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ACL (2)", |
| "volume": "", |
| "issue": "", |
| "pages": "68--74", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thien Huu Nguyen and Ralph Grishman. 2014. Em- ploying word representations and regularization for domain adaptation of relation extraction. In ACL (2), pages 68-74.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Relation extraction: Perspective from convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Huu", |
| "middle": [], |
| "last": "Thien", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "39--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thien Huu Nguyen and Ralph Grishman. 2015. Rela- tion extraction: Perspective from convolutional neu- ral networks. In Proceedings of NAACL-HLT, pages 39-48.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Big data small data, in domain out-of domain, known word unknown word: The impact of word representations on sequence labelling tasks", |
| "authors": [ |
| { |
| "first": "Lizhen", |
| "middle": [], |
| "last": "Qu", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriela", |
| "middle": [], |
| "last": "Ferraro", |
| "suffix": "" |
| }, |
| { |
| "first": "Liyuan", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Hou", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 19th Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "83--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lizhen Qu, Gabriela Ferraro, Liyuan Zhou, Wei- wei Hou, Nathan Schneider, and Timothy Baldwin. 2015. Big data small data, in domain out-of domain, known word unknown word: The impact of word representations on sequence labelling tasks. In Pro- ceedings of the 19th Conference on Computational Natural Language Learning (CoNLL 2015), pages 83-93.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Relation extraction with matrix factorization and universal schemas", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Limin", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin M", |
| "middle": [], |
| "last": "Marlin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Riedel, Limin Yao, Andrew McCallum, and Benjamin M Marlin. 2013. Relation extraction with matrix factorization and universal schemas.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Semantic compositionality through recursive matrix-vector spaces", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Brody", |
| "middle": [], |
| "last": "Huval", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1201--1211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Brody Huval, Christopher D. Man- ning, and Andrew Y. Ng. 2012. Semantic com- positionality through recursive matrix-vector spaces. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Process- ing and Computational Natural Language Learn- ing, EMNLP-CoNLL 2012, July 12-14, 2012, Jeju Island, Korea, pages 1201-1211.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Reasoning with neural tensor networks for knowledge base completion", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Proceedings of a meeting held", |
| "volume": "", |
| "issue": "", |
| "pages": "926--934", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Danqi Chen, Christopher D. Manning, and Andrew Y. Ng. 2013. Reasoning with neural tensor networks for knowledge base completion. In Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Proceedings of a meet- ing held December 5-8, 2013, Lake Tahoe, Nevada, United States., pages 926-934.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Active learning for relation type extension with local and global data views", |
| "authors": [ |
| { |
| "first": "Ang", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 21st ACM international conference on Information and knowledge management", |
| "volume": "", |
| "issue": "", |
| "pages": "1105--1112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ang Sun and Ralph Grishman. 2012. Active learn- ing for relation type extension with local and global data views. In Proceedings of the 21st ACM inter- national conference on Information and knowledge management, pages 1105-1112. ACM.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "A two-stage bootstrapping algorithm for relation extraction", |
| "authors": [ |
| { |
| "first": "Ang", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Recent Advances in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "14--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ang Sun. 2009. A two-stage bootstrapping algo- rithm for relation extraction. In Recent Advances in Natural Language Processing, RANLP 2009, 14-16", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Multi-instance multi-label learning for relation extraction", |
| "authors": [ |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Julie", |
| "middle": [], |
| "last": "Tibshirani", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramesh", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "455--465", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mihai Surdeanu, Julie Tibshirani, Ramesh Nallapati, and Christopher D Manning. 2012. Multi-instance multi-label learning for relation extraction. In Pro- ceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Com- putational Natural Language Learning, pages 455- 465. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural net- works. In Advances in neural information process- ing systems, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Representing text for joint embedding of text and knowledge bases", |
| "authors": [ |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Pantel", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoifung", |
| "middle": [], |
| "last": "Poon", |
| "suffix": "" |
| }, |
| { |
| "first": "Pallavi", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1499--1509", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kristina Toutanova, Danqi Chen, Patrick Pantel, Hoi- fung Poon, Pallavi Choudhury, and Michael Gamon. 2015. Representing text for joint embedding of text and knowledge bases. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, Lisbon, Portugal, September 17- 21, 2015, pages 1499-1509.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Stacked denoising autoencoders: Learning useful representations in a deep network with a local denoising criterion", |
| "authors": [ |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Larochelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabelle", |
| "middle": [], |
| "last": "Lajoie", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre-Antoine", |
| "middle": [], |
| "last": "Manzagol", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "11", |
| "issue": "", |
| "pages": "3371--3408", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pascal Vincent, Hugo Larochelle, Isabelle Lajoie, Yoshua Bengio, and Pierre-Antoine Manzagol. 2010. Stacked denoising autoencoders: Learning useful representations in a deep network with a local denoising criterion. Journal of Machine Learning Research, 11(Dec):3371-3408.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Show and tell: A neural image caption generator", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Toshev", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2014. Show and tell: A neural im- age caption generator. CoRR, abs/1411.4555.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Relation classification via multi-level attention cnns", |
| "authors": [ |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhu", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerard", |
| "middle": [], |
| "last": "De Melo", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Linlin Wang, Zhu Cao, Gerard de Melo, and Zhiyuan Liu. 2016. Relation classification via multi-level attention cnns. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics, ACL 2016, August 7-12, 2016, Berlin, Ger- many, Volume 1: Long Papers.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Connecting language and knowledge bases with embedding models for relation extraction", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Oksana", |
| "middle": [], |
| "last": "Yakhnenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1307.7973" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Weston, Antoine Bordes, Oksana Yakhnenko, and Nicolas Usunier. 2013. Connecting language and knowledge bases with embedding models for re- lation extraction. arXiv preprint arXiv:1307.7973.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Classifying relations via long short term memory networks along shortest dependency paths", |
| "authors": [ |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunchuan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1785--1794", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yan Xu, Lili Mou, Ge Li, Yunchuan Chen, Hao Peng, and Zhi Jin. 2015. Classifying relations via long short term memory networks along shortest depen- dency paths. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Pro- cessing, EMNLP 2015, Lisbon, Portugal, September 17-21, 2015, pages 1785-1794.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Embedding entities and relations for learning and inference in knowledge bases", |
| "authors": [ |
| { |
| "first": "Bishan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen-Tau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6575" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bishan Yang, Wen-tau Yih, Xiaodong He, Jianfeng Gao, and Li Deng. 2014. Embedding entities and relations for learning and inference in knowledge bases. arXiv preprint arXiv:1412.6575.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Combining word embeddings and feature embeddings for fine-grained relation extraction", |
| "authors": [ |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "R" |
| ], |
| "last": "Gormley", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "The Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1374--1379", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mo Yu, Matthew R. Gormley, and Mark Dredze. 2015. Combining word embeddings and feature embed- dings for fine-grained relation extraction. In The Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Denver, Colorado, USA, pages 1374-1379.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Relation classification via convolutional deep neural network", |
| "authors": [ |
| { |
| "first": "Daojian", |
| "middle": [], |
| "last": "Zeng", |
| "suffix": "" |
| }, |
| { |
| "first": "Kang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Siwei", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Guangyou", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "COLING 2014, 25th International Conference on Computational Linguistics, Proceedings of the Conference: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "2335--2344", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daojian Zeng, Kang Liu, Siwei Lai, Guangyou Zhou, and Jun Zhao. 2014. Relation classification via convolutional deep neural network. In COLING 2014, 25th International Conference on Computa- tional Linguistics, Proceedings of the Conference: Technical Papers, August 23-29, 2014, Dublin, Ire- land, pages 2335-2344.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Bidirectional long short-term memory networks for relation classification", |
| "authors": [ |
| { |
| "first": "Shu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dequan", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinchen", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 29th Pacific Asia Conference on Language, Information and Computation", |
| "volume": "29", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shu Zhang, Dequan Zheng, Xinchen Hu, and Ming Yang. 2015. Bidirectional long short-term memory networks for relation classification. In Proceedings of the 29th Pacific Asia Conference on Language, In- formation and Computation, PACLIC 29, Shanghai, China, October 30 -November 1, 2015.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "General Architecture for deep RE models." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Deep relation extraction models." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Comparison between baselines, stepwise training of entity embeddings, and the pre-trained models. The error bars indicate standard deviation computed on all five experiments.(a) randomly initialised, before fine-tuning (b) randomly initialised, after fine-tuning (c) sequence reconstruction, before fine-tuning (d) sequence reconstruction, after fine-tuning Visualization of the relation expressions of the top 5 most frequent relations sampled from the development set. The representations of these expressions are generated by using h(x) of PCNN and further visualized by T-SNE. The top two figures are generated by randomly initialized PCNN , while the bottom ones are generated by PCNN pre-trained with SeqReconstruct. Different relations are marked with different colors." |
| }, |
| "FIGREF3": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Impact of the size of training data." |
| } |
| } |
| } |
| } |