| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:52:08.982037Z" |
| }, |
| "title": "Predicting Informativeness Of Semantic Triples", |
| "authors": [ |
| { |
| "first": "Judita", |
| "middle": [], |
| "last": "Preiss", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Salford", |
| "location": { |
| "postCode": "M5 4WT", |
| "settlement": "Salford", |
| "country": "United Kingdom" |
| } |
| }, |
| "email": "j.preiss@salford.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Many automatic semantic relation extraction tools extract subject-predicate-object triples from unstructured text. However, a large quantity of these triples merely represent background knowledge. We explore using full texts of biomedical publications to create a training corpus of informative and important semantic triples based on the notion that the main contributions of an article are summarized in its abstract. This corpus is used to train a deep learning classifier to identify important triples, and we suggest that an importance ranking for semantic triples could also be generated.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Many automatic semantic relation extraction tools extract subject-predicate-object triples from unstructured text. However, a large quantity of these triples merely represent background knowledge. We explore using full texts of biomedical publications to create a training corpus of informative and important semantic triples based on the notion that the main contributions of an article are summarized in its abstract. This corpus is used to train a deep learning classifier to identify important triples, and we suggest that an importance ranking for semantic triples could also be generated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Subject-predicate-object triples are used in numerous natural language processing areas, including question answering (e.g. Hristovski et al. (2015) ), ontology building (e.g. Du and Li (2020) ) and literature based discovery (e.g. Hristovski et al. (2006) ). While they can be thought of as representing the minimum unit of semantic expression, there is a large degree of variability in the amount of new (not commonly known) content they convey. On the one hand, they sometimes represent what can be termed background knowledge, for example \"New Zealand -ISA -country\" or \"pharmaceutical services -TREATS -health personnel\", while on the other, they may describe very specific findings such as pimobendan TREATS hypertrophic cardiomyopathy or LCN2 protein, human -ASSO-CIATED WITH -chronic kidney disease. We use biomedical publications to test the hypothesis that training data consisting of such, important, triples can be created from abstracts, and train a deep learning algorithm to identify these high importance triples from a list of all triples appearing in a paper. The system could also be adjusted to output a weight instead of a binary decision, allowing for an importance ranking of semantic triples within an article.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 148, |
| "text": "Hristovski et al. (2015)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 176, |
| "end": 192, |
| "text": "Du and Li (2020)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 232, |
| "end": 256, |
| "text": "Hristovski et al. (2006)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper begins with an overview of related work in Section 2, the experimental set-up follows in Section 3, with the results and discussion in Section 4 and conclusions drawn in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A number of tools for automatically extracting semantic relations -(subject, relation, object) triples -from unstructured text exist (Yuan and Yu, 2018) . However, as Papadopoulos et al. (2020) point out, the majority of works incorporating these do not perform much pre-or post-processing and therefore include many potentially uninformative triples, and works proposing to extend currently existing collections of semantic relations often speak of extending the set of relations, not refining the relations present (e.g. Koroleva et al. (2020) ).", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 152, |
| "text": "(Yuan and Yu, 2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 167, |
| "end": 193, |
| "text": "Papadopoulos et al. (2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 523, |
| "end": 545, |
| "text": "Koroleva et al. (2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Evaluations of semantic relation extraction systems are often very comprehensive, e.g. Kilicoglu et al. (2020) present a detailed independent evaluation of SemRep -a biomedical domain tuned triple extraction tool -and discover common sources of error for this tool, but such evaluations do not quantify the quality of the triple that is retrieved by the system. It is unclear whether the incorrectly extracted triples are uninformative, or the opposite.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 110, |
| "text": "Kilicoglu et al. (2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "While not phrased as focusing on informative / important triples, existing works often restrict to particular types of relations: Yuan and Yu (2018) evaluate the extraction of health claims, defined as a relation between something that is being manipulated and something that is being measured (e.g. the relation between a substance and a disease). Yadav et al. (2020) restrict to drug-drug interaction, protein-protein interaction, and medical concept relation extraction, while Hope et al. (2021) focus on mechanisms, i.e. activities, functions and causal relations. Such restrictions are likely to increase the overall quality of the remaining triples: removing the ISA relation alone eliminates a large quantity of background knowledge. The closest to our work is due to Zhang et al. (2021) who filter out uninformative triples computationally, based on the difference between triples' expected and observed frequencies.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 148, |
| "text": "Yuan and Yu (2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 480, |
| "end": 498, |
| "text": "Hope et al. (2021)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 775, |
| "end": 794, |
| "text": "Zhang et al. (2021)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Below, we discuss the two steps needed to explore the hypothesis that a dataset based on abstracts can be used to detect important triples using machine learning: 1) creation of a training corpus, and 2) selection of a deep learning architecture.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Design", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The CORD-19 dataset (Wang et al., 2020) was chosen for this work due to: 1) scale, the 2021-05-03 version contains 198,347 full articles, 2) availability of extracted text, the dataset contains the text extracted from available full article PDFs, 3) domain, the restricted nature of the dataset allows the application of existing biomedical tools.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 39, |
| "text": "(Wang et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Corpus Creation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Subject-relation-object triples are extracted from all article texts present in the dataset using Sem-Rep (Rindflesch and Fiszman, 2003) . Designed for the biomedical domain, the tool extracts triples such as \"imatinib TREATS Gastrointestinal Stromal Tumors\" but with concepts mapped to Unified Medical Language System metathesaurus (UMLS) (Bodenreider, 2004) concept unique identifiers, CUIs (i.e. yielding C0935989 -TREATS -C0238198 for the example). This addresses the problem of multi-word identification (recognizing gastrointestinal stomal tumours rather than merely tumours) and word sense disambiguation (distinguishing between occurrences of concepts with multiple meanings, such as COLD, which could -among other options -represent the common cold or chronic obstructive airway disease).", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 136, |
| "text": "Sem-Rep (Rindflesch and Fiszman, 2003)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Relation Extraction", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "To train a machine learning classifier, a training set of important triples is needed. Since an abstract usually summarizes the main findings of an article, we hypothesize that important triples can be considered to be those that appear in both the body and an abstract. It is important to note that the training set of important triples does not need to be complete, i.e. not every important triple from the body needs to be identified. The dataset should be as noise free as possible, and therefore background knowledge triples (which may appear in both the abstract and the body of an article) should not be included. To reduce noise, the following filtering is performed:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identifying Important Triples", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "\u2022 Previously published triples. The construction of positive examples in the training set hinges on the identification of important triple(s). If these triples are defined as those which describe the novel contribution(s) of an article, an identical triple (i.e. contribution) should not have appeared in abstracts prior to the current paper. Therefore triples appearing in SemRep processed Medline (V40, released October 2019, i.e. before the CORD-19 dataset), a vast collection of biomedical abstracts (Lozano-K\u00fchne, 2013), are removed from the dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identifying Important Triples", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "\u2022 Frequent concepts. Some frequent concepts often appear in non important triples, such as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identifying Important Triples", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "therapeutic procedure TREATS disease malaise PROCESS OF patients lung PART OF homo sapiens", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identifying Important Triples", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Since the training set does not need include an annotation for every triple encountered and there is high probability of mis-annotation with triples involving these concepts, triples involving the top 1% of concepts appearing in V40 of SemRep processed Medline are removed. The top 1% includes patients, therapeutic procedure, homo sapiens and other very general terms. Note that this does not mean that the system will be unable to classify triples including these concepts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identifying Important Triples", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "In some cases, an identical triple is used both in the abstract and the body of an article, however, when repeated, novel contributions of a paper are sometimes rephrased using (near) synonyms. Therefore a measure of triple similarity needs to be defined. Since the triples are of the format subject CU I -predicate word -object CU I , this measure can be defined on each component (subject, predicate, object) separately. Word (CUI) embeddings represent each word (CUI) as a vector which captures information about the contexts it appears in, therefore yielding similar -close -vectors for synonyms. A triple similarity measure can therefore be implemented based on cui2vec (Beam et al., 2019 ) (for subject and object similarity) and GloVe (Pennington et al., 2014) embeddings (for predicate similarity). 1 Similarity between two triples, cui 11 \u2212rel 1 \u2212cui 12 and cui 21 \u2212rel 2 \u2212cui 22 , is then given by the formula cs(c2v(cui 11 ), c2v(cui 21 )) + cs(g(rel 1 ), g(rel 2 )) + cs(c2v(cui 12 ), c2v(cui 22 )) where cs represents the cosine similarity, c2v(x) the cui2vec vector of x and g(x) x's GloVe vector. As the maximum value for cosine similarity is 1, the triple similarity is a decimal between 0 and 3 inclusive, 0 corresponding to complete lack of similarity between triples and 3 an exact match. For each body-triple, a similarity can be computed between it and each abstract-triple in the same article, with the highest becoming the body-triple's similarity value. A threshold can be set on the similarity value to decide which triples are deemed important.", |
| "cite_spans": [ |
| { |
| "start": 675, |
| "end": 693, |
| "text": "(Beam et al., 2019", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 742, |
| "end": 767, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identifying Important Triples", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "The machine learning component consists of three parts: 1) feature extraction, 2) architecture selection, and 3) experiment settings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The ability to extract important triples (described in Section 3.1.2) makes it possible to use supervised machine learning approaches to train a classifier. To this end a number of features are extracted for each body-triple.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Extraction", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Frequency based features: 1) the number of times the triple appeared in the body of the article, and 2) the total number of relations within the body of the publication. UMLS based features: 1) the frequency count of the CUIs in the body triple as extracted from SemRep processed Medline -while the top 1% of CUIs have been discarded, it is believed that CUIs with lower frequencies are more likely to be part of novel contributions, 2) the UMLS source vocabulary of the CUIs -the metathesaurus consists of many different types of biomedical vocabularies and the information pertaining to which one(s) a CUI belongs to can serve to give an overall idea of its category, and 3) the depth of the body triple CUIs within UMLS. For some source vocabularies, a hierarchy is present, allowing the computation of the concept's distance to the root -assuming a concept further away from the root is more likely to be more fine-grained, this feature also investigates whether important triples are more likely to contain more specific CUIs (the shortest path to the root is taken if a concept appears in multiple hierarchies).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Extraction", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Semantics based features: 1) the relation used, 2) the title of the section the body triple appeared in -since the majority of articles in this collection have relatively rigid structure, this was restricted to the commonly prescribed sections such as introduction, background, methods etc, and is based on the hypothesis that a novel contribution of a work is likely to appear in the discussion and / or conclusion sections, and 3) the rank of the sentence the triple appeared in as ranked by TextRank (Mihalcea and Tarau, 2004) . TextRank is a graph based algorithm, often used in summarization, which can be used to order the sentences in an article according to importance, and therefore we hypothesize that a sentence with a low TextRank (high importance) is more likely to yield an important triple.", |
| "cite_spans": [ |
| { |
| "start": 503, |
| "end": 529, |
| "text": "(Mihalcea and Tarau, 2004)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Extraction", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "After performing one hot encoding of the relation feature, this gives 129 features for the 55,745 triples in the dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Extraction", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "While the similarity value of a body-triple calculated as described in Section 3.1.2 can be predicted directly, initial experiments with regression showed that this is hard to do exactly. The problem was therefore framed as binary classification. In this case, a threshold is set on the similarity value and triples with a value above the threshold are used as positive, important, instances.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture Selection", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Deep learning model is chosen due to its ability to cope with feature dependencies. The model, implemented using Keras, was designed with fully connected (dense) layers of halving sizes with the final layer of size 1. ReLU was used for all layers except the last, where the sigmoid activation function was employed. The loss function was binary entropy and accuracy was used as the metric when classes weren't extremely imbalanced, F 1 was used otherwise. A number of parameters were tuned: 1) the depth of the model (with halving sizes, thus depth one model has a single dense layer of size int(129/2), depth two model has two dense layers of sizes int(129/2), int(129/4), and so on), 2) the number of epochs, 3) dropout, and 4) whether class weights were used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture Selection", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "As suggested above -by exploring the use of class weights within the model -the dataset is highly imbalanced with, as expected, the majority of triples not appearing in the abstract. The following methods for addressing this bias were explored:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 Using class weights within the deep learning algorithm: this allows more emphasis to be given to the minority class.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "\u2022 Under-sampling: randomly sampling the majority class such that the number of examples used in training corresponds to a pre-decided ratio. The minority and majority class can be made equal (1:1) but other ratios were explored, making the majority class more frequent but not overpowering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "While all the minority, important, class triples are included in the training set, this does not have to be the case for the majority class. As mentioned above, the triples to include in the minority class are selected by a threshold. However, this can lead to a triple with, say, similarity of 2.5 being included in important triples, while a triple with similarity of 2.499 appearing in the non important triples class. Such small difference may be detrimental to the performance of the machine learning algorithm and a buffer band of similarities between the two classes was also explored. I.e. two thresholds, t 1 and t 2 are set such that t 1 \u2212 t 2 > 0 and all triples with similarity >= t 1 are assigned to the important class while triples with similarity <= t 2 are deemed not important.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3.2.3" |
| }, |
| { |
| "text": "A 5-fold cross validation was performed, and each explored model was trained on (a possibly balance adjusted version of) the training portion giving rise to an accuracy or F-measure on the test portion. This allows an average to be computed and the best model to be determined. The results are presented in Table 1 : the similarity value refers to the threshold from Section 3.1.2 used to determine which triples are considered important, the buffer band -when on -removes the cases close to the similarity value threshold from training as described in Section 3.2.3, and the majority column represents the percentage of the training dataset attributed to the majority class. The final columns present the hyperparameters of the best model for the specific combination and the average accuracy / F-measure.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 307, |
| "end": 314, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results And Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "With under-sampling, the accuracies for similarities >= 2 were all within 2% of the best performance, supporting the hypothesis regarding frequent use of synonyms. To avoid a uniform assignment of the majority class, the F-measure metric (which rewards both precision and recall) is used in models without under-sampling. An F-measure of 1 represents perfect precision and recall, and the highest F-measure achieved is 0.975.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results And Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "SHapley Additive exPlanations (SHAP) (Lundberg et al., 2018) uses ideas from game theory to explain feature contributions to machine learning decisions. Figure 1 depicts the feature contributions on a randomly selected sample of 100 triples for the best model without under-sampling. Each dot represents a single triple, with the intensity (blue \u2192 pink) indicating whether the feature value was low or high. The horizontal position indicates whether the contribution caused the prediction to go up -towards being classified as an important triple -or down. The top three rows show expected results: that high values in the number of relations in the document, very frequently occurring CUIs or relations arising from sentences low in importance ranked by TextRank (giving a high rank) impact the prediction very negatively. Unsurprising positive contributors are: 1) the frequency of the triple in the document: a new contribution may be reiterated in the document, 2) the triple appearing in the conclusion: this often contains a summary of contributions, 3) the triple including the TREATS relation: the filtering ensures this is a new triple and being treatment specific, is likely the focus of the work, 4) the triple appearing in the intro- Contributions are also due to the CUIs' UMLS source vocabulary (indicated by source ). In some cases, these categorize the CUI: for example, AOD (alcohol and other drug thesaurus) and PSY (psychological index terms) are not unexpected. Surprising may be the pair MSHNOR and MSHJPN, representing the Norwegian and Japanese translations of Medical Subject Headings, as they appear to have opposite effect. However, MSHJPN's contribution is very limited, suggesting that its completeness may not match that of MSHNOR.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 153, |
| "end": 161, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results And Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We have demonstrated that a dataset of semantic triples created from full articles based on similarity between triples in the body of the text and triples in the abstract can be used to train a deep learning classifier to make predictions about a semantic triple's importance. An analysis of feature contributions was also performed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions And Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "While a direct prediction of the similarity score appeared difficult with the quantity of data available, converting the similarity scores into categorical values may be trainable and would provide the basis of a ranking. Again with greater quantity of data, features based on medical subject headings of each CUI could be beneficial indicated by the success of the UMLS source vocabulary features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions And Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The work undertaken was in the biomedical domain based on a tool tuned for biomedical domain grammatical relation extraction. Porting the approach to another domain, where subject-verbobject triples would need to be extracted using a generic grammatical relation extraction algorithm and some features would require re-engineering, would also form an interesting extension of the work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions And Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "GloVe embeddings were chosen since the predicate words are being compared in isolation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was partly supported by the ERDF Greater Manchester AI Foundry grant, and I would like to thank Sunil Vadera in particular for his support. Thanks also go to the anonymous reviewers for ensuring clarity of this paper as well as pointing out some very nice avenues for further work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Clinical concept embeddings learned from massive sources of multimodal medical data", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [ |
| "L" |
| ], |
| "last": "Beam", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Kompa", |
| "suffix": "" |
| }, |
| { |
| "first": "Allen", |
| "middle": [], |
| "last": "Schmaltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Inbar", |
| "middle": [], |
| "last": "Fried", |
| "suffix": "" |
| }, |
| { |
| "first": "Griffin", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [ |
| "P" |
| ], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianxi", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [ |
| "S" |
| ], |
| "last": "Kohane", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.01486" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew L. Beam, Benjamin Kompa, Allen Schmaltz, Inbar Fried, Griffin Weber, Nathan P. Palmer, Xu Shi, Tianxi Cai, and Isaac S. Kohane. 2019. Clin- ical concept embeddings learned from massive sources of multimodal medical data. arXiv preprint arXiv:1804.01486.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "The unified medical language system (UMLS): integrating biomedical terminology", |
| "authors": [ |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Bodenreider", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Nucleic Acids Research", |
| "volume": "32", |
| "issue": "", |
| "pages": "267--270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olivier Bodenreider. 2004. The unified medical lan- guage system (UMLS): integrating biomedical ter- minology. Nucleic Acids Research, 32:D267-D270.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A knowledge graph of combined drug therapies using semantic predications from biomedical literature: Algorithm development", |
| "authors": [ |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoying", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "JMIR Med Inform", |
| "volume": "8", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jian Du and Xiaoying Li. 2020. A knowledge graph of combined drug therapies using semantic predica- tions from biomedical literature: Algorithm develop- ment. JMIR Med Inform, 8(4):e18323.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Extracting a knowledge base of mechanisms from COVID-19 papers", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Hope", |
| "suffix": "" |
| }, |
| { |
| "first": "Aida", |
| "middle": [], |
| "last": "Amini", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Wadden", |
| "suffix": "" |
| }, |
| { |
| "first": "Madeleine", |
| "middle": [], |
| "last": "Van Zuylen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sravanthi", |
| "middle": [], |
| "last": "Parasa", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Horvitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "4489--4503", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Hope, Aida Amini, David Wadden, Madeleine van Zuylen, Sravanthi Parasa, Eric Horvitz, Daniel S. Weld, Roy Schwartz, and Hannaneh Hajishirzi. 2021. Extracting a knowledge base of mechanisms from COVID-19 papers. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4489-4503.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Biomedical question answering using semantic relations", |
| "authors": [ |
| { |
| "first": "Dimitar", |
| "middle": [], |
| "last": "Hristovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Dejan", |
| "middle": [], |
| "last": "Dinevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Kastrin", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Rindflesch", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "BMC Bioinformatics", |
| "volume": "16", |
| "issue": "6", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitar Hristovski, Dejan Dinevski, Andrej Kastrin, and Thomas Rindflesch. 2015. Biomedical question answering using semantic relations. BMC Bioinfor- matics, 16(6).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Exploiting semantic relations for literature-based discovery", |
| "authors": [ |
| { |
| "first": "Dimitar", |
| "middle": [], |
| "last": "Hristovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Carol", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 2006 AMIA Annual Symposium", |
| "volume": "", |
| "issue": "", |
| "pages": "349--353", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitar Hristovski, Carol Friedman, Thomas C. Rind- flesch, and Borut Peterlin. 2006. Exploiting se- mantic relations for literature-based discovery. In Proceedings of the 2006 AMIA Annual Symposium, pages 349-353.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Marcelo Fiszman, and Dongwook Shin. 2020. Broad-coverage biomedical relation extraction with semrep", |
| "authors": [ |
| { |
| "first": "Halil", |
| "middle": [], |
| "last": "Kilicoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Graciela", |
| "middle": [], |
| "last": "Rosemblat", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "BMC bioinformatics", |
| "volume": "21", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Halil Kilicoglu, Graciela Rosemblat, Marcelo Fisz- man, and Dongwook Shin. 2020. Broad-coverage biomedical relation extraction with semrep. BMC bioinformatics, 21(1):188.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Towards creating a new triple store for literature-based discovery", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Koroleva", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Anisimova", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Gil", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Trends and Applications in Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "41--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Koroleva, Maria Anisimova, and Manuel Gil. 2020. Towards creating a new triple store for literature-based discovery. In Trends and Appli- cations in Knowledge Discovery and Data Mining, pages 41-50.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Explainable machine-learning predictions for the prevention of hypoxaemia during surgery", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "Bala", |
| "middle": [], |
| "last": "Lundberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Monica", |
| "middle": [ |
| "S" |
| ], |
| "last": "Nair", |
| "suffix": "" |
| }, |
| { |
| "first": "Mayumi", |
| "middle": [], |
| "last": "Vavilala", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Horibe", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Eisses", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "E" |
| ], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel King-Wai", |
| "middle": [], |
| "last": "Liston", |
| "suffix": "" |
| }, |
| { |
| "first": "Shu-Fang", |
| "middle": [], |
| "last": "Low", |
| "suffix": "" |
| }, |
| { |
| "first": "Jerry", |
| "middle": [], |
| "last": "Newman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Nature Biomedical Engineering", |
| "volume": "2", |
| "issue": "10", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott M Lundberg, Bala Nair, Monica S Vavilala, Mayumi Horibe, Michael J Eisses, Trevor Adams, David E Liston, Daniel King-Wai Low, Shu-Fang Newman, Jerry Kim, et al. 2018. Explainable machine-learning predictions for the prevention of hypoxaemia during surgery. Nature Biomedical En- gineering, 2(10):749.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "TextRank: Bringing order into text", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Tarau", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "404--411", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea and Paul Tarau. 2004. TextRank: Bringing order into text. In Proceedings of the 2004 Conference on Empirical Methods in Natural Lan- guage Processing, pages 404-411.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A methodology for open information extraction and representation from large scientific corpora: The CORD-19 data exploration use case", |
| "authors": [ |
| { |
| "first": "Dimitris", |
| "middle": [], |
| "last": "Papadopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikolaos", |
| "middle": [], |
| "last": "Papadakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonis", |
| "middle": [], |
| "last": "Litke", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Applied Sciences", |
| "volume": "", |
| "issue": "16", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitris Papadopoulos, Nikolaos Papadakis, and An- tonis Litke. 2020. A methodology for open infor- mation extraction and representation from large sci- entific corpora: The CORD-19 data exploration use case. Applied Sciences, 10(16).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "GloVe: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Confer- ence on Empirical Methods in Natural Language Processing, pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The interaction of domain knowledge and linguistic structure in natural language processing: interpreting hypernymic propositions in biomedical text", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcelo", |
| "middle": [], |
| "last": "Rindflesch", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fiszman", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Biomedical Informatics", |
| "volume": "36", |
| "issue": "6", |
| "pages": "462--477", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas C. Rindflesch and Marcelo Fiszman. 2003. The interaction of domain knowledge and linguis- tic structure in natural language processing: inter- preting hypernymic propositions in biomedical text. Journal of Biomedical Informatics, 36(6):462-477.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Relation extraction from biomedical and clinical text: Unified multitask learning framework", |
| "authors": [ |
| { |
| "first": "Shweta", |
| "middle": [], |
| "last": "Yadav", |
| "suffix": "" |
| }, |
| { |
| "first": "Srivastsa", |
| "middle": [], |
| "last": "Ramesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Sriparna", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "Asif", |
| "middle": [], |
| "last": "Ekbal", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "IEEE/ACM Trans Comput Biol Bioinform", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shweta Yadav, Srivastsa Ramesh, Sriparna Saha, and Asif Ekbal. 2020. Relation extraction from biomed- ical and clinical text: Unified multitask learning framework. IEEE/ACM Trans Comput Biol Bioin- form.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "An evaluation of information extraction tools for identifying health claims in news headlines", |
| "authors": [ |
| { |
| "first": "Shi", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bei", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Workshop Events and Stories in the", |
| "volume": "", |
| "issue": "", |
| "pages": "34--43", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shi Yuan and Bei Yu. 2018. An evaluation of infor- mation extraction tools for identifying health claims in news headlines. In Proceedings of the Workshop Events and Stories in the News 2018, pages 34-43. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Andrej Kastrin, Marcelo Fiszman, and Halil Kilicoglu. 2021. Drug repurposing for COVID-19 via knowledge graph completion", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitar", |
| "middle": [], |
| "last": "Hristovski", |
| "suffix": "" |
| }, |
| { |
| "first": "Dalton", |
| "middle": [], |
| "last": "Schutte", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Journal of biomedical informatics", |
| "volume": "115", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Zhang, Dimitar Hristovski, Dalton Schutte, An- drej Kastrin, Marcelo Fiszman, and Halil Kilicoglu. 2021. Drug repurposing for COVID-19 via knowl- edge graph completion. Journal of biomedical infor- matics, 115(103696).", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Feature contributions duction, where the novelties of the work are often highlighted. The contributions of a higher depth value is also, as expected, positive.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| } |
| } |
| } |
| } |