| { |
| "paper_id": "E12-1043", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:36:20.054476Z" |
| }, |
| "title": "Combining Tree Structures, Flat Features and Patterns for Biomedical Relation Extraction", |
| "authors": [ |
| { |
| "first": "Md", |
| "middle": [], |
| "last": "Faisal", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Mahbub", |
| "middle": [], |
| "last": "Chowdhury", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "chowdhury@fbk.eu" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Lavelli", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "lavelli@fbk.eu" |
| }, |
| { |
| "first": "\u2021", |
| "middle": [], |
| "last": "Fondazione", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Bruno", |
| "middle": [], |
| "last": "Kessler", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Kernel based methods dominate the current trend for various relation extraction tasks including protein-protein interaction (PPI) extraction. PPI information is critical in understanding biological processes. Despite considerable efforts, previously reported PPI extraction results show that none of the approaches already known in the literature is consistently better than other approaches when evaluated on different benchmark PPI corpora. In this paper, we propose a novel hybrid kernel that combines (automatically collected) dependency patterns, trigger words, negative cues, walk features and regular expression patterns along with tree kernel and shallow linguistic kernel. The proposed kernel outperforms the exiting state-of-the-art approaches on the BioInfer corpus, the largest PPI benchmark corpus available. On the other four smaller benchmark corpora, it performs either better or almost as good as the existing approaches. Moreover, empirical results show that the proposed hybrid kernel attains considerably higher precision than the existing approaches, which indicates its capability of learning more accurate models. This also demonstrates that the different types of information that we use are able to complement each other for relation extraction.", |
| "pdf_parse": { |
| "paper_id": "E12-1043", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Kernel based methods dominate the current trend for various relation extraction tasks including protein-protein interaction (PPI) extraction. PPI information is critical in understanding biological processes. Despite considerable efforts, previously reported PPI extraction results show that none of the approaches already known in the literature is consistently better than other approaches when evaluated on different benchmark PPI corpora. In this paper, we propose a novel hybrid kernel that combines (automatically collected) dependency patterns, trigger words, negative cues, walk features and regular expression patterns along with tree kernel and shallow linguistic kernel. The proposed kernel outperforms the exiting state-of-the-art approaches on the BioInfer corpus, the largest PPI benchmark corpus available. On the other four smaller benchmark corpora, it performs either better or almost as good as the existing approaches. Moreover, empirical results show that the proposed hybrid kernel attains considerably higher precision than the existing approaches, which indicates its capability of learning more accurate models. This also demonstrates that the different types of information that we use are able to complement each other for relation extraction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Kernel methods are considered the most effective techniques for various relation extraction (RE) tasks on both general (e.g. newspaper text) and specialized (e.g. biomedical text) domains. In particular, as the importance of syntactic structures for deriving the relationships between entities in text has been growing, several graph and tree kernels have been designed and experimented.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Early RE approaches more or less fall in one of the following categories: (i) exploitation of statistics about co-occurrences of entities, (ii) usage of patterns and rules, and (iii) usage of flat features to train machine learning (ML) classifiers. These approaches have been studied for a long period and have their own pros and cons. Exploitation of co-occurrence statistics results in high recall but low precision, while rule or pattern based approaches can increase precision but suffer from low recall. Flat feature based ML approaches employ various kinds of linguistic, syntactic or contextual information and integrate them into the feature space. They obtain relatively good results but are hindered by drawbacks of limited feature space and excessive feature engineering. Kernel based approaches have become an attractive alternative solution, as they can exploit huge amount of features without an explicit representation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a new hybrid kernel for RE. We apply the kernel to Protein-protein interaction (PPI) extraction, the most widely researched topic in biomedical relation extraction. PPI 1 information is very critical in understanding biological processes. Considerable progress has been made for this task. Nevertheless, empirical results of previous studies show that none of the approaches already known in the literature is consistently better than other approaches when evaluated on different benchmark PPI corpora (see Table 4 ). This demands further study and innovation of new approaches that are sensitive to the variations of complex linguistic constructions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 533, |
| "end": 540, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The proposed hybrid kernel is the composition of one tree kernel and two feature based kernels (one of them is already known in the literature and the other is proposed in this paper for the first time). The novelty of the newly proposed feature based kernel is that it envisages to accommodate the advantages of pattern based approaches. More precisely:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. We propose a new feature based kernel (details in Section 4.1) by using syntactic dependency patterns, trigger words, negative cues, regular expression (henceforth, regex) patterns and walk features (i.e. e-walks and v-walks) 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2. The syntactic dependency patterns are automatically collected from a type of dependency subgraph (we call it reduced graph, more details in Section 4.1.1) during runtime.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "3. We only use the regex patterns, trigger words and negative cues mentioned in the literature (Ono et al., 2001; Fundel et al., 2007; Bui et al., 2010) . The objective is to verify whether we can exploit knowledge which is already known and used.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 113, |
| "text": "(Ono et al., 2001;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 114, |
| "end": 134, |
| "text": "Fundel et al., 2007;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 135, |
| "end": 152, |
| "text": "Bui et al., 2010)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "4. We propose a hybrid kernel by combining the proposed feature based kernel (outlined above) with the Shallow Linguistic (SL) kernel (Giuliano et al., 2006) and the Path-enclosed Tree (PET) kernel (Moschitti, 2004) .", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 157, |
| "text": "(Giuliano et al., 2006)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 198, |
| "end": 215, |
| "text": "(Moschitti, 2004)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The aim of our work is to take advantage of different types of information (i.e., dependency patterns, regex patterns, trigger words, negative cues, syntactic dependencies among words and constituent parse trees) and their different representations (i.e. flat features, tree structures and graphs) which can complement each other to learn more accurate models. 2 The syntactic dependencies of the words of a sentence create a dependency graph. A v-walk feature consists of (wordi \u2212 dependency typei,i+1 \u2212 wordi+1), and an ewalk feature is composed of (dependency typei\u22121,i \u2212 wordi \u2212 dependency typei,i+1). Note that, in a dependency graph, the words are nodes while the dependency types are edges.", |
| "cite_spans": [ |
| { |
| "start": 361, |
| "end": 362, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The remainder of the paper is organized as follows. In Section 2, we briefly review previous work. Section 3 lists the datasets. Then, in Section 4, we define our proposed hybrid kernel and describe its individual component kernels. Section 5 outlines the experimental settings. Following that, empirical results are discussed in Section 6. Finally, we conclude with a summary of our study as well as suggestions for further improvement of our approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we briefly discuss some of the recent work on PPI extraction. Several RE approaches have been reported to date for the PPI task, most of which are kernel based methods. Tikk et al. (2010) reported a benchmark evaluation of various kernels on PPI extraction. An interesting finding is that the Shallow Linguistic (SL) kernel (Giuliano et al., 2006 ) (to be discussed in Section 4.2), despite its simplicity, is on par with the best kernels in most of the evaluation settings. Kim et al. (2010) proposed walk-weighted subsequence kernel using e-walks, partial matches, non-contiguous paths, and different weights for different sub-structures (which are used to capture structural similarities during kernel computation). Miwa et al. (2009a) proposed a hybrid kernel, which combines the all-paths graph (APG) kernel , the bag-of-words kernel, and the subset tree kernel (Moschitti, 2006 ) (applied on the shortest dependency paths between target protein pairs). They used multiple parser inputs. The system is regarded as the current state-of-theart PPI extraction system because of its high results on different PPI corpora (see the results in Table 4 ).", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 204, |
| "text": "Tikk et al. (2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 341, |
| "end": 363, |
| "text": "(Giuliano et al., 2006", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 492, |
| "end": 509, |
| "text": "Kim et al. (2010)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 736, |
| "end": 755, |
| "text": "Miwa et al. (2009a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 884, |
| "end": 900, |
| "text": "(Moschitti, 2006", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1159, |
| "end": 1166, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As an extension of their work, they boosted system performance by training on multiple PPI corpora instead of on a single corpus and adopting a corpus weighting concept with support vector machine (SVM) which they call SVM-CW (Miwa et al., 2009b) . Since most of their results are reported by training on the combination of multiple corpora, it is not possible to compare them directly with the results published in the other related works (that usually adopt 10-fold cross validation on a single PPI corpus). To be comparable with the vast majority of the existing work, we also report results using 10-fold cross validation on single corpora. Apart from the approaches described above, there also exist other studies that used kernels for PPI extraction (e.g. subsequence kernel (Bunescu and Mooney, 2006) ).", |
| "cite_spans": [ |
| { |
| "start": 226, |
| "end": 246, |
| "text": "(Miwa et al., 2009b)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 781, |
| "end": 807, |
| "text": "(Bunescu and Mooney, 2006)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A notable exception is the work published by Bui et al. (2010) . They proposed an approach that consists of two phases. In the first phase, their system categorizes the data into different groups (i.e. subsets) based on various properties and patterns. Later they classify candidate PPI pairs inside each of the groups using SVM trained with features specific for the corresponding group.", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 62, |
| "text": "Bui et al. (2010)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There are 5 benchmark corpora for the PPI task that are frequently used: HPRD50 (Fundel et al., 2007) , IEPA (Ding et al., 2002) , LLL (N\u00e9dellec, 2005) , BioInfer (Pyysalo et al., 2007) and AIMed (Bunescu et al., 2005) . These corpora adopt different PPI annotation formats. For a comparative evaluation put all of them in a common format which has become the standard evaluation format for the PPI task. In our experiments, we use the versions of the corpora converted to such format. Table 1 shows various statistics regarding the 5 (converted) corpora.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 101, |
| "text": "(Fundel et al., 2007)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 109, |
| "end": 128, |
| "text": "(Ding et al., 2002)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 135, |
| "end": 151, |
| "text": "(N\u00e9dellec, 2005)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 163, |
| "end": 185, |
| "text": "(Pyysalo et al., 2007)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 196, |
| "end": 218, |
| "text": "(Bunescu et al., 2005)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 486, |
| "end": 493, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The hybrid kernel that we propose is as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Hybrid Kernel", |
| "sec_num": "4" |
| }, |
| { |
| "text": "K Hybrid (R 1 , R 2 ) = K T P W F (R 1 , R 2 ) + K SL (R 1 , R 2 ) + w * K P ET (R 1 , R 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Hybrid Kernel", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where K T P W F stands for the new feature based kernel (henceforth, TPWF kernel) computed using flat features collected by exploiting patterns, trigger words, negative cues and walk features. K SL and K P ET stand for the Shallow Linguistic (SL) kernel and the Path-enclosed Tree (PET) kernel respectively. w is a multiplicative constant used for the PET kernel. It allows the hybrid kernel to assign more (or less) weight to the information obtained using tree structures depending on the corpus. The proposed hybrid kernel is valid according to the closure properties of kernels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Hybrid Kernel", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Both the TPWF and SL kernels are linear kernels, while PET kernel is computed using Unlexicalized Partial Tree (uPT) kernel (Severyn and Moschitti, 2010) . The following subsections explain each of the individual kernels in more detail.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 153, |
| "text": "(Severyn and Moschitti, 2010)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Hybrid Kernel", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For each of the candidate entity pairs, we construct a type of subgraph from the dependency graph formed by the syntactic dependencies among the words of a sentence. We call it \"reduced graph\" and define it in the following way:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "A reduced graph is a subgraph of the dependency graph of a sentence which includes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "\u2022 the two candidate entities and their governor nodes up to their least common governor (if exists). \u2022 dependent nodes (if exist) of all the nodes added in the previous step. \u2022 the immediate governor(s) (if exists) of the least common governor. Figure 1 shows an example of a reduced graph. A reduced graph is an extension of the smallest common subgraph of the dependency graph that aims at overcoming its limitations. It is a known issue that the smallest common subgraph (or subtree) sometimes does not contain cue words. Previously, Chowdhury et al. (2011a) proposed a linguistically motivated extension of the minimal (i.e. smallest) common subtree (which includes the candidate entity pairs), known as Mildly Extended Dependency Tree (MEDT). However, the rules used for MEDT are too constrained. Our objective in constructing the reduced graph is to include any potential modifier(s) or cue word(s) that describes the relation between the given pair of entities. Sometimes such modifiers or cue words are not directly dependent (syntactically) on any Figure 1: Dependency graph for the sentence \"A pVHL mutant containing a P154L substitution does not promote degradation of HIF1-Alpha\" generated by the Stanford parser. The edges with blue dots form the smallest common subgraph for the candidate entity pair pVHL and HIF1-Alpha, while the edges with red dots form the reduced graph for the pair.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 253, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "of the entities (of the candidate pair). Rather they are dependent on some other word(s) which is dependent on one (or both) of the entities. The word \"not\" in Figure 1 is one such example. The reduced graph aims to preserve these cue words. The following types of features are collected from the reduced graph of a candidate pair: 1. HasTriggerWord: whether the least common governor(s) of the target entity pairs inside the reduced graph matches any trigger word.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 160, |
| "end": 168, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "2. Trigger-X: whether the least common governor(s) of the target entity pairs inside the reduced graph matches the trigger word 'X'.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "3. HasNegWord: whether the reduced graph contains any negative word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "4. DepPattern-i: whether the reduced graph contains all the syntactic dependencies of the i-th pattern of dependency pattern list.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "The dependency pattern list is automatically constructed from the training data during the learning phase. Each pattern is a set of syntactic dependencies of the corresponding reduced graph of a (positive or negative) entity pair in the training data. For example, the dependency pattern for the reduced graph in Figure 1 is {det, amod, partmod, nsubj, aux, neg, dobj, prep of }. The same dependency pattern might be constructed for multiple (positive or negative) entity pairs. However, if it is constructed for both positive and negative pairs, it has to be discarded from the pattern list.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 313, |
| "end": 321, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "The dependency patterns allow some kind of underspecification as they do not contain the lexical items (i.e. words) but contain the likely combination of syntactic dependencies that a given related pair of entities would pose inside their reduced graph.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "The list of trigger words contains 144 words previously used by Bui et al. (2010) and Fundel et al. (2007) . The list of negative cues contain 18 words, most of which are mentioned in Fundel et al. (2007) .", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 81, |
| "text": "Bui et al. (2010)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 86, |
| "end": 106, |
| "text": "Fundel et al. (2007)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 184, |
| "end": 204, |
| "text": "Fundel et al. (2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced graph, trigger words, negative cues and dependency patterns", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "We extract e-walk and v-walk features from the Mildly Extended Dependency Tree (MEDT) (Chowdhury et al., 2011a) of each candidate pair. Reduced graphs sometimes include some unin- Table 3 : Results of the proposed hybrid kernel and its individual components. Pos. and Neg. refer to number positive and negative relations respectively. PET refers to the path-enclosed tree kernel, SL refers to the shallow linguistic kernel, and TPWF refers to the kernel computed using trigger, pattern, negative cue and walk features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 180, |
| "end": 187, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "formative words which produce uninformative walk features. Hence, they are not suitable for walk feature generation. MEDT suits better for this purpose. The walk features extracted from MEDTs have the following properties:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "\u2022 The directionality of the edges (or nodes) in an e-walk (or v-walk) is not considered. In other words, e.g., pos(stimulatory)\u2212amod\u2212 pos(ef f ects) and pos(ef f ects) \u2212 amod \u2212 pos(stimulatory) are treated as the same feature.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "\u2022 The v-walk features are of the form (pos i \u2212 dependency type i,i+1 \u2212pos i+1 ). Here, pos i is the POS tag of word i , i is the governor node and i + 1 is the dependent node.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "\u2022 The e-walk features are of the form", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "(dep. type i\u22121,i \u2212 pos i \u2212 dep. type i,i+1 ) and (dep. type i\u22121,i \u2212 lemma i \u2212 dep. type i,i+1 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "Here, lemma i is the lemmatized form of word i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "\u2022 Usually, the e-walk features are constructed using dependency types between {governor of X, node X} and {node X, dependent of X}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "However, we also extract e-walk features from the dependency types between any two dependents and their common governor (i.e.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "{node X, dependent 1 of X} and {node X, dependent 2 of X}).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "Apart from the above types of features, we also add features for lemmas of the immediate preceding and following words of the candidate entities. These feature names are augmented with -1 or +1 depending on whether the corresponding words are preceded or followed by a candidate entity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Walk features", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "We use a set of 22 regex patterns as binary features. These patterns were previously used by Ono et al. (2001) and Bui et al. (2010) . If there is a match for a pattern (e.g. \"Entity 1.*activates.*Entity 2\" where Entity 1 and Entity 2 form the candidate entity pair) in a given sentence, value 1 is added for the feature (i.e., pattern) inside the feature vector.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 110, |
| "text": "Ono et al. (2001)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 115, |
| "end": 132, |
| "text": "Bui et al. (2010)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regular expression patterns", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "The Shallow Linguistic (SL) kernel was proposed by Giuliano et al. (2006) . It is one of the best performing kernels applied on different biomedical RE tasks such as PPI and DDI (drug-drug interaction) extraction (Tikk et al., 2010; Segura-Bedmar et al., 2011; Chowdhury and Lavelli, 2011b; Chowdhury et al., 2011c) . It is defined as follows: Miwa et al. (2009b) showed that better results can be obtained using multiple corpora for training. However, we consider only those results of their experiments where they used single training corpus as it is the standard evaluation approach adopted by all the other studies on PPI extraction for comparing results. All the results of the previous approaches reported in this table are directly quoted from their respective original papers.", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 73, |
| "text": "Giuliano et al. (2006)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 213, |
| "end": 232, |
| "text": "(Tikk et al., 2010;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 233, |
| "end": 260, |
| "text": "Segura-Bedmar et al., 2011;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 261, |
| "end": 290, |
| "text": "Chowdhury and Lavelli, 2011b;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 291, |
| "end": 315, |
| "text": "Chowdhury et al., 2011c)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 344, |
| "end": 363, |
| "text": "Miwa et al. (2009b)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Linguistic (SL) Kernel", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "K SL (R 1 , R 2 ) = K LC (R 1 , R 2 ) + K GC (R 1 , R 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Linguistic (SL) Kernel", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where K SL , K GC and K LC correspond to SL, global context (GC) and local context (LC) kernels respectively. The GC kernel exploits contextual information of the words occurring before, between and after the pair of entities (to be investigated for RE) in the corresponding sentence; while the LC kernel exploits contextual information surrounding individual entities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Linguistic (SL) Kernel", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The path-enclosed tree (PET) kernel 3 was first proposed by Moschitti (2004) for semantic role labeling. It was later successfully adapted by Zhang et al. (2005) and other works for relation extraction on general texts (such as newspaper do-3 Also known as shortest path-enclosed tree (SPT) kernel. main). A PET is the smallest common subtree of a phrase structure tree that includes the two entities involved in a relation.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 76, |
| "text": "Moschitti (2004)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 142, |
| "end": 161, |
| "text": "Zhang et al. (2005)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Path-enclosed tree (PET) Kernel", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A tree kernel calculates the similarity between two input trees by counting the number of common sub-structures. Different techniques have been proposed to measure such similarity. We use the Unlexicalized Partial Tree (uPT) kernel (Severyn and Moschitti, 2010) for the computation of the PET kernel since a comparative evaluation by Chowdhury et al. (2011a) reported that uPT kernels achieve better results for PPI extraction than the other techniques used for tree kernel computation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Path-enclosed tree (PET) Kernel", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We have followed the same criteria commonly used for the PPI extraction tasks, i.e. abstractwise 10-fold cross validation on individual corpus and one-answer-per-occurrence criterion. In fact, we have used exactly the same (abstract-wise) fold splitting of the 5 benchmark (converted) corpora used by Tikk et al. (2010) for benchmarking various kernel methods 4 .", |
| "cite_spans": [ |
| { |
| "start": 301, |
| "end": 319, |
| "text": "Tikk et al. (2010)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The Charniak-Johnson reranking parser (Charniak and Johnson, 2005) , along with a self-trained biomedical parsing model (McClosky, 2010) , has been used for tokenization, POS-tagging and parsing of the sentences. Before parsing the sentences, all the entities are blinded by assigning names as EntityX where X is the entity index. In each example, the POS tags of the two candidate entities are changed to EntityX. The parse trees produced by the Charniak-Johnson reranking parser are then processed by the Stanford parser 5 (Klein and Manning, 2003) to obtain syntactic dependencies according to the Stanford Typed Dependency format.", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 66, |
| "text": "Charniak-Johnson reranking parser (Charniak and Johnson, 2005)", |
| "ref_id": null |
| }, |
| { |
| "start": 120, |
| "end": 136, |
| "text": "(McClosky, 2010)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 525, |
| "end": 550, |
| "text": "(Klein and Manning, 2003)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The Stanford parser often skips some syntactic dependencies in output. We use the following two rules to add some of such dependencies:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 If there is a \"conj and\" or \"conj or\" dependency between two words X and Y, then X should be dependent on any word Z on which Y is dependent and vice versa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 If there are two verbs X and Y such that inside the corresponding sentence they have only the word \"and\" or \"or\" between them, then any word Z dependent on X should be also dependent on Y and vice versa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our system exploits SVM-LIGHT-TK 6 (Moschitti, 2006; Joachims, 1999) . We made minor changes in the toolkit to compute the proposed hybrid kernel. The ratio of negative and positive examples has been used as the value of the costratio-factor parameter. We have done parameter tuning following the approach described by Hsu et al. (2003) . 4 Downloaded from http://informatik.huberlin.de/forschung /gebiete/wbi/ppi-benchmark .", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 52, |
| "text": "(Moschitti, 2006;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 53, |
| "end": 68, |
| "text": "Joachims, 1999)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 319, |
| "end": 336, |
| "text": "Hsu et al. (2003)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 339, |
| "end": 340, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5" |
| }, |
| { |
| "text": "5 http://nlp.stanford.edu/software/lex-parser.shtml 6 http://disi.unitn.it/moschitti/Tree-Kernel.htm", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5" |
| }, |
| { |
| "text": "To measure the contribution of the features collected from the reduced graphs (using dependency patterns, trigger words and negative cues) and regex patterns, we have applied the new TPWF kernel on the 5 PPI corpora before and after using these features. Results shown in Table 2 clearly indicate that usage of these features improve the performance. The improvement of performance is primarily due to the usage of dependency patterns which resulted in higher precision for all the corpora.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 272, |
| "end": 279, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We have tried to measure the contribution of the regex patterns. However, from the empirical results a clear trend does not emerge (see Table 2 ). Table 3 shows a comparison among the results of the proposed hybrid kernel and its individual components. As we can see, the overall results of the hybrid kernel (with and without using regex pattern features) are better than those by any of its individual component kernels. Interestingly, precision achieved on the 4 benchmark corpora (other than the smallest corpus LLL) is much higher for the hybrid kernel than for the individual components. This strongly indicates that these different types of information (i.e. dependency patterns, regex patterns, triggers, negative cues, syntactic dependencies among words and constituent parse trees) and their different representations (i.e. flat features, tree structures and graphs) can complement each other to learn more accurate models. Table 4 shows a comparison of the PPI extraction results of our proposed hybrid kernel with those of other state-of-the-art approaches. Since the contribution of regex patterns in the performance of the hybrid kernel was not relevant (as Tables 2 and 3 show), we used the results of proposed hybrid kernel without regex for the comparison. As we can see, the proposed kernel achieves significantly higher results on the BioInfer corpus, the largest benchmark PPI corpus (2,534 positive PPI pair annotations) available, than any of the existing approaches. Moreover, the results of the proposed hybrid kernel are on par with the stateof-the-art results on the other smaller corpora.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 136, |
| "end": 144, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 148, |
| "end": 155, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 935, |
| "end": 942, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1173, |
| "end": 1187, |
| "text": "Tables 2 and 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Furthermore, empirical results show that the proposed hybrid kernel attains considerably higher precision than the existing approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Since a dependency pattern, by construction, contains all the syntactic dependencies inside the corresponding reduced graph, it may happen that some of the dependencies (e.g. det or determiner) are not informative for classifying the label of the corresponding class label (i.e., positive or negative relation) of the pattern. Their presence inside a pattern might make it unnecessarily rigid and less general. So, we tried to identify and discard such non informative dependencies by measuring probabilities of the dependencies with respect to the class label and then removing any of them which has probability lower than a threshold (we tried with different threshold values). But doing so decreased the performance. This suggests that the syntactic dependencies of a dependency pattern are not independent of each other even if some of them might have low probability (with respect to the class label) individually. We plan to further investigate whether there could be different criteria for identifying non informative dependencies. For the work reported in this paper, we used the dependency patterns as they are initially constructed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We also did experiments to see whether collecting features for trigger words from the whole reduced graph would help. But that also decreased performance. This suggests that trigger words are more likely to appear in the least common governors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this paper, we have proposed a new hybrid kernel for RE that combines two vector based kernels and a tree kernel. The proposed kernel outperforms any of the exiting approaches by a wide margin on the BioInfer corpus, the largest PPI benchmark corpus available. On the other four smaller benchmark corpora, it performs either better or almost as good as the existing stateof-the art approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We have also proposed a novel feature based kernel, called TPWF kernel, using (automatically collected) dependency patterns, trigger words, negative cues, walk features and regular expression patterns. The TPWF kernel is used as a component of the new hybrid kernel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Empirical results show that the proposed hybrid kernel achieves considerably higher precision than the existing approaches, which indicates its capability of learning more accurate models. This also demonstrates that the different types of information that we use are able to complement each other for relation extraction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We believe there are at least three ways to further improve the proposed approach. First of all, the 22 regular expression patterns (collected from Ono et al. (2001) and Bui et al. (2010) ) are applied at the level of the sentences and this sometimes produces unwanted matches. For example, consider the sentence \"X activates Y and inhibits Z\" where X, Y, and Z are entities. The pattern \"Entity1. * activates. * Entity2\" matches both the X-Y and X-Z pairs in the sentence. But only the X-Y pair should be considered. So, the patterns should be constrained to reduce the number of unwanted matches. For example, they could be applied on smaller linguistic units than full sentences. Secondly, different techniques could be used to identify less-informative syntactic dependencies inside dependency patterns to make them more accurate and effective. Thirdly, usage of automatically collected paraphrases of regular expression patterns instead of the patterns directly could be also helpful. Weakly supervised collection of paraphrases for RE has been already investigated (e.g. ) and, hence, can be tried for improving the TPWF kernel (which is a component of the proposed hybrid kernel).", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 165, |
| "text": "Ono et al. (2001)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 170, |
| "end": 187, |
| "text": "Bui et al. (2010)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "PPIs occur when two or more proteins bind together, and are integral to virtually all cellular processes, such as metabolism, signalling, regulation, and proliferation(Tikk et al., 2010).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was carried out in the context of the project \"eOnco -Pervasive knowledge and data management in cancer care\". The authors are grateful to Alessandro Moschitti for his help in the use of SVM-LIGHT-TK. We also thank the anonymous reviewers for helpful suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "All-paths graph kernel for protein-protein interaction extraction with evaluation of cross-corpus learning", |
| "authors": [ |
| { |
| "first": "Antti", |
| "middle": [], |
| "last": "Airola", |
| "suffix": "" |
| }, |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jari", |
| "middle": [], |
| "last": "Bjorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Tapio", |
| "middle": [], |
| "last": "Pahikkala", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Tapio", |
| "middle": [], |
| "last": "Salakoski", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "BMC Bioinformatics", |
| "volume": "9", |
| "issue": "11", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antti Airola, Sampo Pyysalo, Jari Bjorne, Tapio Pahikkala, Filip Ginter, and Tapio Salakoski. 2008. All-paths graph kernel for protein-protein inter- action extraction with evaluation of cross-corpus learning. BMC Bioinformatics, 9(Suppl 11):S2.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A hybrid approach to extract proteinprotein interactions", |
| "authors": [ |
| { |
| "first": "Quoc-Chinh", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "Sophia", |
| "middle": [], |
| "last": "Katrenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "M A" |
| ], |
| "last": "Sloot", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Bioinformatics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quoc-Chinh Bui, Sophia Katrenko, and Peter M.A. Sloot. 2010. A hybrid approach to extract protein- protein interactions. Bioinformatics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Subsequence kernels for relation extraction", |
| "authors": [ |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Bunescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of NIPS 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "171--178", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Razvan Bunescu and Raymond J. Mooney. 2006. Subsequence kernels for relation extraction. In Pro- ceedings of NIPS 2006, pages 171-178.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Comparative experiments on learning information extractors for proteins and their interactions", |
| "authors": [ |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Bunescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruifang", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohit", |
| "middle": [ |
| "J" |
| ], |
| "last": "Kate", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [ |
| "M" |
| ], |
| "last": "Marcotte", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mooney", |
| "suffix": "" |
| }, |
| { |
| "first": "Arun", |
| "middle": [], |
| "last": "Kumar Ramani", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Artificial Intelligence in Medicine", |
| "volume": "33", |
| "issue": "2", |
| "pages": "139--155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Razvan Bunescu, Ruifang Ge, Rohit J. Kate, Ed- ward M. Marcotte, Raymond J. Mooney, Arun Ku- mar Ramani, and Yuk Wah Wong. 2005. Compara- tive experiments on learning information extractors for proteins and their interactions. Artificial Intelli- gence in Medicine, 33(2):139-155.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Coarseto-fine n-best parsing and maxent discriminative reranking", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ACL 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Charniak and Mark Johnson. 2005. Coarse- to-fine n-best parsing and maxent discriminative reranking. In Proceedings of ACL 2005.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Drug-drug interaction extraction using composite kernels", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Md", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Mahbub Chowdhury", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lavelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of DDIExtrac-tion2011: First Challenge Task: Drug-Drug Interaction Extraction", |
| "volume": "", |
| "issue": "", |
| "pages": "27--33", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Md. Faisal Mahbub Chowdhury and Alberto Lavelli. 2011b. Drug-drug interaction extraction using com- posite kernels. In Proceedings of DDIExtrac- tion2011: First Challenge Task: Drug-Drug In- teraction Extraction, pages 27-33, Huelva, Spain, September.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A study on dependency tree kernels for automatic extraction of protein-protein interaction", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Md", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Mahbub Chowdhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Lavelli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of BioNLP 2011 Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "124--133", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Md. Faisal Mahbub Chowdhury, Alberto Lavelli, and Alessandro Moschitti. 2011a. A study on de- pendency tree kernels for automatic extraction of protein-protein interaction. In Proceedings of BioNLP 2011 Workshop, pages 124-133, Portland, Oregon, USA, June.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Two dierent machine learning techniques for drugdrug interaction extraction", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Md", |
| "suffix": "" |
| }, |
| { |
| "first": "Asma", |
| "middle": [], |
| "last": "Mahbub Chowdhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Ben Abacha", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Lavelli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zweigenbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of DDIExtraction2011: First Challenge Task: Drug-Drug Interaction Extraction", |
| "volume": "", |
| "issue": "", |
| "pages": "19--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Md. Faisal Mahbub Chowdhury, Asma Ben Abacha, Alberto Lavelli, and Pierre Zweigenbaum. 2011c. Two dierent machine learning techniques for drug- drug interaction extraction. In Proceedings of DDIExtraction2011: First Challenge Task: Drug- Drug Interaction Extraction, pages 19-26, Huelva, Spain, September.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Mining MEDLINE: abstracts, sentences, or phrases? Pacific Symposium on Biocomputing", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Berleant", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Nettleton", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Wurtele", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "326--337", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Ding, D. Berleant, D. Nettleton, and E. Wurtele. 2002. Mining MEDLINE: abstracts, sentences, or phrases? Pacific Symposium on Biocomputing, pages 326-337.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Relex-relation extraction using dependency parse trees", |
| "authors": [ |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Fundel", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "K\u00fcffner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralf", |
| "middle": [], |
| "last": "Zimmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Bioinformatics", |
| "volume": "23", |
| "issue": "3", |
| "pages": "365--371", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katrin Fundel, Robert K\u00fcffner, and Ralf Zimmer. 2007. Relex-relation extraction using dependency parse trees. Bioinformatics, 23(3):365-371.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Exploiting shallow linguistic information for relation extraction from biomedical literature", |
| "authors": [ |
| { |
| "first": "Claudio", |
| "middle": [], |
| "last": "Giuliano", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Lavelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Lorenza", |
| "middle": [], |
| "last": "Romano", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EACL 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "401--408", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claudio Giuliano, Alberto Lavelli, and Lorenza Ro- mano. 2006. Exploiting shallow linguistic infor- mation for relation extraction from biomedical lit- erature. In Proceedings of EACL 2006, pages 401- 408.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A practical guide to support vector classification", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "W" |
| ], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "CW Hsu, CC Chang, and CJ Lin, 2003. A practical guide to support vector classification. Department of Computer Science and Information Engineering, National Taiwan University, Taipei, Taiwan.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Making large-scale support vector machine learning practical", |
| "authors": [ |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Advances in kernel methods: support vector learning", |
| "volume": "", |
| "issue": "", |
| "pages": "169--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thorsten Joachims. 1999. Making large-scale sup- port vector machine learning practical. In Advances in kernel methods: support vector learning, pages 169-184. MIT Press, Cambridge, MA, USA.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Walk-weighted subsequence kernels for protein-protein interaction extraction", |
| "authors": [ |
| { |
| "first": "Seonho", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Juntae", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Jihoon", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Seog", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "BMC Bioinformatics", |
| "volume": "", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seonho Kim, Juntae Yoon, Jihoon Yang, and Seog Park. 2010. Walk-weighted subsequence kernels for protein-protein interaction extraction. BMC Bioinformatics, 11(1).", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Accurate unlexicalized parsing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of ACL 2003", |
| "volume": "", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D. Manning. 2003. Accu- rate unlexicalized parsing. In Proceedings of ACL 2003, pages 423-430, Sapporo, Japan.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Any Domain Parsing: Automatic Domain Adaptation for Natural Language Parsing", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcclosky", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David McClosky. 2010. Any Domain Parsing: Au- tomatic Domain Adaptation for Natural Language Parsing. Ph.D. thesis, Department of Computer Science, Brown University.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Protein-protein interaction extraction by leveraging multiple kernels and parsers", |
| "authors": [ |
| { |
| "first": "Makoto", |
| "middle": [], |
| "last": "Miwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Rune", |
| "middle": [], |
| "last": "Saetre", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "International Journal of Medical Informatics", |
| "volume": "78", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Makoto Miwa, Rune Saetre, Yusuke Miyao, and Jun'ichi Tsujii. 2009a. Protein-protein interac- tion extraction by leveraging multiple kernels and parsers. International Journal of Medical Informat- ics, 78.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A rich feature vector for protein-protein interaction extraction from multiple corpora", |
| "authors": [ |
| { |
| "first": "Makoto", |
| "middle": [], |
| "last": "Miwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Rune", |
| "middle": [], |
| "last": "Saetre", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of EMNLP 2009", |
| "volume": "", |
| "issue": "", |
| "pages": "121--130", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Makoto Miwa, Rune Saetre, Yusuke Miyao, and Jun'ichi Tsujii. 2009b. A rich feature vector for protein-protein interaction extraction from multiple corpora. In Proceedings of EMNLP 2009, pages 121-130, Singapore.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A study on convolution kernels for shallow semantic parsing", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alessandro Moschitti. 2004. A study on convolution kernels for shallow semantic parsing. In Proceed- ings of ACL 2004, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Making Tree Kernels Practical for Natural Language Learning", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alessandro Moschitti. 2006. Making Tree Kernels Practical for Natural Language Learning. In Pro- ceedings of EACL 2006, Trento, Italy.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Learning language in logicgenic interaction extraction challenge", |
| "authors": [ |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "N\u00e9dellec", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the ICML 2005 workshop: Learning Language in Logic (LLL05)", |
| "volume": "", |
| "issue": "", |
| "pages": "31--37", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claire N\u00e9dellec. 2005. Learning language in logic - genic interaction extraction challenge. Proceedings of the ICML 2005 workshop: Learning Language in Logic (LLL05), pages 31-37.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Automated extraction of information on protein-protein interactions from the biological literature", |
| "authors": [ |
| { |
| "first": "Toshihide", |
| "middle": [], |
| "last": "Ono", |
| "suffix": "" |
| }, |
| { |
| "first": "Haretsugu", |
| "middle": [], |
| "last": "Hishigaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Akira", |
| "middle": [], |
| "last": "Tanigami", |
| "suffix": "" |
| }, |
| { |
| "first": "Toshihisa", |
| "middle": [], |
| "last": "Takagi", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Bioinformatics", |
| "volume": "17", |
| "issue": "2", |
| "pages": "155--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshihide Ono, Haretsugu Hishigaki, Akira Tanigami, and Toshihisa Takagi. 2001. Automated ex- traction of information on protein-protein interac- tions from the biological literature. Bioinformatics, 17(2):155-161.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Bioinfer: a corpus for information extraction in the biomedical domain", |
| "authors": [ |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Juho", |
| "middle": [], |
| "last": "Heimonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jari", |
| "middle": [], |
| "last": "Bj\u00f6rne", |
| "suffix": "" |
| }, |
| { |
| "first": "Jorma", |
| "middle": [], |
| "last": "Boberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jouni", |
| "middle": [], |
| "last": "Jarvinen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tapio", |
| "middle": [], |
| "last": "Salakoski", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "BMC Bioinformatics", |
| "volume": "8", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sampo Pyysalo, Filip Ginter, Juho Heimonen, Jari Bj\u00f6rne, Jorma Boberg, Jouni Jarvinen, and Tapio Salakoski. 2007. Bioinfer: a corpus for information extraction in the biomedical domain. BMC Bioin- formatics, 8(1):50.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Comparative analysis of five protein-protein interaction corpora", |
| "authors": [ |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Antti", |
| "middle": [], |
| "last": "Airola", |
| "suffix": "" |
| }, |
| { |
| "first": "Juho", |
| "middle": [], |
| "last": "Heimonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jari", |
| "middle": [], |
| "last": "Bj\u00f6rne", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Tapio", |
| "middle": [], |
| "last": "Salakoski", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "BMC Bioinformatics", |
| "volume": "9", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sampo Pyysalo, Antti Airola, Juho Heimonen, Jari Bj\u00f6rne, Filip Ginter, and Tapio Salakoski. 2008. Comparative analysis of five protein-protein in- teraction corpora. BMC Bioinformatics, 9(Suppl 3):S6.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Investigating a generic paraphrase-based approach for relation extraction", |
| "authors": [ |
| { |
| "first": "Lorenza", |
| "middle": [], |
| "last": "Romano", |
| "suffix": "" |
| }, |
| { |
| "first": "Milen", |
| "middle": [], |
| "last": "Kouylekov", |
| "suffix": "" |
| }, |
| { |
| "first": "Idan", |
| "middle": [], |
| "last": "Szpektor", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Lavelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EACL 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "409--416", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lorenza Romano, Milen Kouylekov, Idan Szpektor, Ido Dagan, and Alberto Lavelli. 2006. Investi- gating a generic paraphrase-based approach for re- lation extraction. In Proceedings of EACL 2006, pages 409-416.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Using a shallow linguistic kernel for drug-drug interaction extraction", |
| "authors": [ |
| { |
| "first": "Isabel", |
| "middle": [], |
| "last": "Segura-Bedmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Paloma", |
| "middle": [], |
| "last": "Mart\u00ednez", |
| "suffix": "" |
| }, |
| { |
| "first": "Cesar", |
| "middle": [], |
| "last": "De Pablo-S\u00e1nchez", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Biomedical Informatics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Isabel Segura-Bedmar, Paloma Mart\u00ednez, and Cesar de Pablo-S\u00e1nchez. 2011. Using a shallow linguistic kernel for drug-drug interaction extraction. Jour- nal of Biomedical Informatics, In Press, Corrected Proof, Available online, 24 April.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Fast cutting plane training for structural kernels", |
| "authors": [ |
| { |
| "first": "Aliaksei", |
| "middle": [], |
| "last": "Severyn", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of ECML-PKDD", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aliaksei Severyn and Alessandro Moschitti. 2010. Fast cutting plane training for structural kernels. In Proceedings of ECML-PKDD 2010.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "A Comprehensive Benchmark of Kernel Methods to Extract Protein-Protein Interactions from Literature", |
| "authors": [ |
| { |
| "first": "Domonkos", |
| "middle": [], |
| "last": "Tikk", |
| "suffix": "" |
| }, |
| { |
| "first": "Philippe", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Palaga", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Hakenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulf", |
| "middle": [], |
| "last": "Leser", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "PLoS Computational Biology", |
| "volume": "6", |
| "issue": "7", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Domonkos Tikk, Philippe Thomas, Peter Palaga, J\u00f6rg Hakenberg, and Ulf Leser. 2010. A Compre- hensive Benchmark of Kernel Methods to Extract Protein-Protein Interactions from Literature. PLoS Computational Biology, 6(7), July.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Discovering relations between named entities from a large raw corpus using tree similarity-based clustering", |
| "authors": [ |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Danmei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Chew Lim", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Natural Language Processing -IJCNLP 2005", |
| "volume": "3651", |
| "issue": "", |
| "pages": "378--389", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Min Zhang, Jian Su, Danmei Wang, Guodong Zhou, and Chew Lim Tan. 2005. Discovering relations between named entities from a large raw corpus us- ing tree similarity-based clustering. In Natural Lan- guage Processing -IJCNLP 2005, volume 3651 of Lecture Notes in Computer Science, pages 378-389.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "num": null, |
| "content": "<table><tr><td>: Basic statistics of the 5 benchmark PPI cor-</td></tr><tr><td>pora.</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "" |
| }, |
| "TABREF3": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Results of the proposed TPWF feature based kernel on 5 benchmark PPI corpora before and after adding features collected using dependency patterns, regex patterns, trigger words and negative cues to the walk features. The TPWF kernel is a component of the new hybrid kernel." |
| }, |
| "TABREF6": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Comparison of the results on the 5 benchmark PPI corpora. Pos. and Neg. refer to number positive and negative relations respectively. The underlined numbers indicate the best results for the corresponding corpus reported by any of the existing state-of-the-art approaches. The results ofBui et al. (2010) on LLL, HPRD50, and IEPA are not reported since thy did not use all the positive and negative examples during cross validation." |
| } |
| } |
| } |
| } |