| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:36:25.731338Z" |
| }, |
| "title": "Reconstructing Manual Information Extraction with DB-to-Document Backprojection: Experiments in the Life Science Domain", |
| "authors": [ |
| { |
| "first": "Mark-Christoph", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heidelberg Institute for Theoretical Studies gGmbH", |
| "location": { |
| "settlement": "Heidelberg", |
| "country": "Germany" |
| } |
| }, |
| "email": "mark-christoph.mueller@h-its.org" |
| }, |
| { |
| "first": "Sucheta", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heidelberg Institute for Theoretical Studies gGmbH", |
| "location": { |
| "settlement": "Heidelberg", |
| "country": "Germany" |
| } |
| }, |
| "email": "sucheta.ghosh@h-its.org" |
| }, |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Rey", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heidelberg Institute for Theoretical Studies gGmbH", |
| "location": { |
| "settlement": "Heidelberg", |
| "country": "Germany" |
| } |
| }, |
| "email": "maja.rey@h-its.org" |
| }, |
| { |
| "first": "Ulrike", |
| "middle": [], |
| "last": "Wittig", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heidelberg Institute for Theoretical Studies gGmbH", |
| "location": { |
| "settlement": "Heidelberg", |
| "country": "Germany" |
| } |
| }, |
| "email": "ulrike.wittig@h-its.org" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heidelberg Institute for Theoretical Studies gGmbH", |
| "location": { |
| "settlement": "Heidelberg", |
| "country": "Germany" |
| } |
| }, |
| "email": "wolfgang.mueller@h-its.org" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Strube", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heidelberg Institute for Theoretical Studies gGmbH", |
| "location": { |
| "settlement": "Heidelberg", |
| "country": "Germany" |
| } |
| }, |
| "email": "michael.strube@h-its.org" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We introduce a novel scientific document processing task for making previously inaccessible information in printed paper documents available to automatic processing. We describe our data set of scanned documents and data records from the biological database SABIO-RK, provide a definition of the task, and report findings from preliminary experiments. Rigorous evaluation proved challenging due to lack of gold-standard data and a difficult notion of correctness. Qualitative inspection of results, however, showed the feasibility and usefulness of the task.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We introduce a novel scientific document processing task for making previously inaccessible information in printed paper documents available to automatic processing. We describe our data set of scanned documents and data records from the biological database SABIO-RK, provide a definition of the task, and report findings from preliminary experiments. Rigorous evaluation proved challenging due to lack of gold-standard data and a difficult notion of correctness. Qualitative inspection of results, however, showed the feasibility and usefulness of the task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Research results from the life sciences are mainly published in the form of written journal or conference papers, even though these results often take the form of measurements of experimental parameters, which would more appropriately be stored in a structured, machine-readable form. While there is some tendency towards directly publishing experimental data, e.g. on SourceData (Liechti et al., 2016) or (for environmental data) PANGAEA 1 , this is not the norm yet, and does not help with the huge body of data already published in the conventional literature. It is common practice in the life sciences, therefore, to manually extract information (including measurements and the experimental conditions underlying them) from natural language documents, and to use it to populate biological databases. This process is called biocuration (International Society for Biocuration, 2018) and comprises, for every document, 1) identification and mark-up of curatable information, 2) data extraction, normalization, and consolidation, and 3) database insertion. Despite constant improvements in NLP technology, biocuration involves significant human labor (mostly reading) (Oughtred 1 www.pangaea.de Huang et al., 2020; Wu et al., 2020; Abdelhakim et al., 2020) , because data quality (i.e. correctness and integrity) has priority over quantity (i.e. more quickly available, but potentially less reliable, data), and the error rates of current NLP systems are still considered too high (Karp, 2016) . For reasons of ergonomics and ease of handling (Buchanan and Loizides, 2007; K\u00f6pper et al., 2016; Clinton, 2019) , the identification and mark-up step often involves paper printouts and highlighter pens, 2 like in the example page in Figure 1 . As mere intermediate products of the curation process, the manually highlighted printouts are only required until all data from the respective document has been curated, and they will normally be archived afterwards. We argue, however, that the printouts contain even more information which curation simply does not make full use of: First, some document sections, although containing highlighting, will not lead to the creation of a record in the biological database (see our results in Section 5.3). Yet, this highlighting can still be regarded as a kind of relevance annotation, produced by life science domain experts through attentive, task-oriented reading. Obviously, this information should be useful, e.g. for the analysis of how important information is dispersed over a scientific document. Second, for those database records that are created from highlighted document sections, the reference to that section is normally not preserved. Again, an obvious way to use this information is to allow users of the biological database to visually trace the record to its source in the document, including the original context. In this paper, we describe our approach towards re-purposing scientific document printouts which were manually highlighted during biocuration. More precisely, our research question is: Given records of curated information from the database and the original, scanned source document, (to what degree) can we recover the document section that a particular record was extracted from?", |
| "cite_spans": [ |
| { |
| "start": 380, |
| "end": 402, |
| "text": "(Liechti et al., 2016)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1169, |
| "end": 1180, |
| "text": "(Oughtred 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1196, |
| "end": 1215, |
| "text": "Huang et al., 2020;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1216, |
| "end": 1232, |
| "text": "Wu et al., 2020;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1233, |
| "end": 1257, |
| "text": "Abdelhakim et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 1482, |
| "end": 1494, |
| "text": "(Karp, 2016)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1544, |
| "end": 1573, |
| "text": "(Buchanan and Loizides, 2007;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1574, |
| "end": 1594, |
| "text": "K\u00f6pper et al., 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1595, |
| "end": 1609, |
| "text": "Clinton, 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1731, |
| "end": 1739, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We consider this to be a novel scientific document processing task, and propose to refer to it as DBto-document backprojection. The remainder of the paper is structured as follows. In Section 2 we describe the data basis of our work. Section 3 introduces the highlighted text extraction task, which we consider as self-contained and only loosely linked to the main task. Section 4 deals with the actual DB-to-document backprojection task, provides a precise definition, and describes our processing steps. Section 5 presents some preliminary experiments, results, and error analysis.Initially, this section will also discuss our approach to evaluation. In Section 6 we discuss some related work, and Section 7 contains our conclusions and directions for the future. Note that, although our data is from the life sciences, the task is relevant for all domains where manual information extraction is performed on natural language documents (like e.g. in Lipani et al. (2014) , where information is extracted from IR research papers in the form of machine-readable 'nanopublications').", |
| "cite_spans": [ |
| { |
| "start": 952, |
| "end": 972, |
| "text": "Lipani et al. (2014)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The work in this paper is based on two related data sets, which have been collected in the SABIO-RK Biochemical Reaction Kinetics Database project 3 . SABIO-RK is a curated database containing structured information about biochemical reactions and their corresponding kinetics (Wittig et al., 2017 (Wittig et al., , 2018 . The document data set is an electronic 3 http://sabio.h-its.org/ version of our archive of 6, 000+ manually highlighted printouts of documents from the life science domain, which have been curated in the 10+ years of our database's existence. Over the years, numerous different curators were involved in the manual mark-up. Different highlighter colors were used, sometimes even within the same document (see Figure 1 ). In case of equivalent information appearing repeatedly in the same document, curators generally attempted to be economical and to avoid redundancy by highlighting only the most appropriate appearance, which is often, but not always, the first appearance. While the mark-up was performed in a completely unrestricted manner (cf. below), in the vast majority of cases, highlighting was applied directly to words or lines (cf. Figure 1 ), which greatly helped in extracting the highlighted text (cf. Section 3). In some rare cases, curators selected whole sections by drawing a vertical line at the section's margin. Also, data in tables was sometimes highlighted on the cell level, while in other cases, only the column header, the table header, or even the table caption was highlighted. We created an electronic version of the document collection by scanning and OCR-processing all papers 4 , which resulted in a sandwich PDF for each document with the (partially highlighted) background superimposed with the extracted text. OCR was performed with commercial software (Alaris Capture Pro), which was used out-of-the-box. The total number of tokens in the 98 documents is 630, 153, with 6, 430 tokens/document on average. The second data set is the record data set which contains measurements of kinetic parameters that were extracted from individual documents from the document collection in the course of manual curation. Each of the 2, 916 records in this data set is linked to exactly one source document (via its PubMed ID), but no lower-level links (to pages or lines) exist. Each document, in turn, can be linked to an arbitrary number of records (29.76 records/document on average). It has to be noted that the above count of 2, 916 records contains a considerable number of multiple counts. This is true in particular for records of type experimental condition (cf. below), and is due to the fact that often, several measurements are performed under identical experimental conditions. For scoring and evaluation, however, this does not make a differ-ence, because we conflate semantically identical records before analysis. There are two main types of records, experimental condition and parameter. Note that we only consider a subset of all a-v pairs available for each record: Some attributes have unspecific values (e.g. role:'Variable') which are not useful for searching. Also, most attributes have a variant with a normalized value, which does not appear in the text. With the exception of the experimental conditions' buffer attribute, all values are atomic. Therefore, the buffer attribute will be handled differently in the second phase of backprojection (see Section 4.2).", |
| "cite_spans": [ |
| { |
| "start": 277, |
| "end": 297, |
| "text": "(Wittig et al., 2017", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 298, |
| "end": 320, |
| "text": "(Wittig et al., , 2018", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 732, |
| "end": 740, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1168, |
| "end": 1176, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Highlighted text extraction comprises 1) extracting from each sandwich PDF both the searchable plain text and a background image for every page, 2) detecting highlighted areas in the background images, and 3) mapping the detected image areas to the extracted text. The workflow is shown in Figure 4 . We use both pdftohtml and pdftotext from the Poppler 5 library to extract data from the scanned and OCR-processed sandwich PDF documents from our collection. The only task of pdftohtml is to extract, from each page, a PNG image with the non-textual background, which also includes the color-marked areas. These images were already generated during OCR processing and consist of document pages from which pixels that were detected as belonging to text were removed by inpainting (see 'Page background image' in the lower left part of Figure 4 ). pdftotext, on the other hand, is used to extract the text that was previously recognized by OCR. It produces one XML file for the document, incl. bounding boxes on the token-level. These tokens reflect the original document layout, but come in correct reading order even for multi-column documents. The second step makes use of some simple image processing. As described above (Section 2), document highlighting can come in any color, so searching for areas of any particular color (like e.g. yellow) is not an option. Instead, our algorithm combines the facts that 1) highlighting is always non-grey and 2) shades of grey in the RGB color model are characterized by identical, or at least highly similar, values in the R, G, and B components. 6 We create a binarized version of each page by going over all pixels in a copy of the original image and setting each pixel to 'black' if the difference between the R, G, and B components is above a threshold of 50 (i.e. if the pixel is non-grey), and setting it to 'white' otherwise. The resulting image, then, contains regions with higher and lower density of black pixels (see 'Binarized page background image' in Figure 4 ). In the last step, text tokens are labelled as highlighted if their bounding boxes (from the XML file), when projected to the binarized image, cover an area that is at least 50% black. While this process is very simple, we found it to work surprisingly well, at least for the very frequent cases where the highlighting was applied directly to words or lines, yielding almost perfect extraction accuracy on most of the images we inspected. Of the 630, 153 tokens in our data set, only 39, 071 (6.2%) were detected as containing highlighting.", |
| "cite_spans": [ |
| { |
| "start": 1591, |
| "end": 1592, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 290, |
| "end": 299, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 835, |
| "end": 843, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 2009, |
| "end": 2018, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Highlighted Text Extraction", |
| "sec_num": "3" |
| }, |
| { |
| "text": "4 DB-to-Document Backprojection 4.1 Task Definition DB-to-document backprojection attempts to reconstruct the manual information extraction performed during database curation, by recovering those document sections that the curated database records were extracted from. It works by matching database record values (as strings) to plain text from document sections. More precisely, we define the task as follows: Let D be the document data set, R the record data set, R(d) the set of records that were extracted from document d \u2208 D, and V(r) the set of values belonging to record r \u2208 R(d). Also, let SEC(d, sec size) be the set of document sections of sec size tokens into which document d \u2208 D can be segmented. Then, for every document d \u2208 D, for every section s \u2208 SEC(d, sec size), and for every record r \u2208 R(d), a backprojection score between 0.0 and 1.0 is computed by counting how many of the values in V(r) can be matched to the tokens in s, and normalizing by the total number of values in V(r). The result is a list of < record, section, score > tuples for every document, from which the most plausible backprojections still needs to be selected (cf. Section 5.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Highlighted Text Extraction", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The following is worth noting. First and foremost, the above definition reflects the fact that there is no simple notion of a correct backprojection of a database record to a document section, neither in our data sets nor, arguably, in reality. In part, this is because the same (or highly similar) information can appear in more than one section of a document. Second, the value of sec size is important because, by specifying the number of tokens that are considered at the same time, it might penalize records with a comparably large number of values. At the same time, however, an excessively large sec size will undermine the whole endeavour because it will be difficult to locate the actual matched values within the section. Also, with increasing sec size, there is a growing risk of clustering values which are actually completely unrelated, creating spurious backprojection results. Finally, the role of automatically detected highlighting for DB-to-document backprojection is still unclear. Since obtaining this highlighting information was the prime reason for scanning the paper printouts in the first place, a rather strong contribution of this feature is desirable. One obvious role of highlighting is that of a filter for preventing non-highlighted tokens from being potential backprojection targets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Highlighted Text Extraction", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The following two steps are performed for every document d \u2208 D and for every record r \u2208 R(d). In the first step, search term creation, the non-empty values in V(r) are converted into search terms. Initially, one search term list is created for each nonempty v \u2208 V(r). Thus, a record with three values will yield as many search term lists with one term each (an example is given below). These three lists are complementary, i.e. we try to match elements of as many of them as possible in a given document section. In order to improve matching and to capture variations introduced by spelling alternatives and / or OCR errors, we apply the following heuristics, which add alternative search terms to individual search term lists: For numerical values with a decimal point (e.g. '9.5'), we add a term where that character is replaced by a comma. If OCR OPTIMIZE=TRUE: For string values containing the \u00b5 character (e.g. '\u00b5g/ml' or '\u00b5M'), we add a term where that character is replaced by a 'p', which is a common OCR error / substitution. Likewise, for string values containing a lowercase 'm' character at the end (e.g. Km), we add several terms where that character is replaced by 'tn', 'ni', and a combination of commas, which are common OCR errors if the 'm' appears as a subscript. If USE SYNONYMS=TRUE: For string values representing chemical compound names, we consult a look-up table and add synonyms, spelling variants, or abbreviations as alternative terms. For illustration, with OCR OPTIMIZE=TRUE, the record of type parameter from Figure 3 above yields the following list of search term lists, with a range of possible matches from zero to five, corresponding to its number of values. Note the spelling variants for the first and second value. During term matching (cf. below), only one item per search term list needs to match in order for the value (first item in each list) to match.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1540, |
| "end": 1548, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Processing Steps", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "[ ['Km', 'Ktn', 'Kni', 'K,,,'] ", |
| "cite_spans": [ |
| { |
| "start": 2, |
| "end": 30, |
| "text": "['Km', 'Ktn', 'Kni', 'K,,,']", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Processing Steps", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": ", ['\u00b5M', 'pM'], ['123'], ['12'], ['Acetyl-CoA']]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Processing Steps", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As mentioned in Section 2, the buffer attribute of records of type experimental condition is special because its value is a manually edited, commaseparated string containing several chemical substance names (see Figure 2 ). We split each value string into a list of individual substance names, and add each of these names as an additional search term list for that record. Then, in the second step, the actual term matching is performed in the following way: For each document d \u2208 D, we iterate over all tokens in d (extracted from the XML output of pdftotext, cf. Section 3), all records r \u2208 R(d), and all search term lists created for the respective r in the previous step. Then, we iterate over the terms in each search term list, trying to match each one in turn. Matching is done simply by using regular expressions. If a term can be matched to a token, we collect the matching record's ID and the matched value in the token's matchlist, and move on to the next search term list. This matching process is performed only once, and it is the same regardless of the value of sec size. Next, sections of different sizes are created by moving a window of size sec size over all tokens in d, one token at a time. These sections are the potential targets for backprojection. In our experiments, sec size ranges from 3 to 39, in steps of 3, and the following steps are performed for each value of sec size. If the first token in a potential section has a non-empty match list, a matching result for the entire section is computed in the following way: First, all record IDs with a match anywhere in the section are collected. Then, for each of these records, a section score is computed by counting the distinct matches in the section and normalizing that with the maximum number of possible matches. The restriction to distinct values means that if a term matches more than one token in a section, it is only counted once for each record. Without this restriction, values appearing repeatedly in the same section (like e.g. unit names) would incorrectly boost the scores for the respective records. In most cases, a record will match several sections with different scores, but we only select the top scoring sections for each record. In the end, this results in a mapping of record IDs to the top score for these records and a list of sections with this score. In addition, we introduce the following experimental parameters into the backprojection step: HL ROLE: If HL ROLE=IGNORE, highlighting information is not used, if HL ROLE=ONLY, only highlighted tokens (as determined by highlighted text extraction (Section 3) will be considered for matching. MIN MATCHES: The minimum number of values for a record that need to match in a section in order for that section to be considered. MIN MATCHES < 2 will yield a lot of spurious matches. REQUIRE NUM MATCH: If REQUIRE NUM MATCH=TRUE, at least one of the matched record values in a section must be numeric. This is based on the rationale that numeric values are more distinctive than e.g. matches for parameter or unit names.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 212, |
| "end": 220, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Processing Steps", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As described in the task definition in the previous Section 4, the result of performing a DB-todocument backprojection run with a given set of parameters on a single document is a mapping of each record in the document to those section(s) that yielded the maximum score for that record (possibly none). While the inspection of this result (including visualisation, cf. below) is straightforward, an actual quantitative evaluation is more difficult. This evaluation would have to include the identification of true and false positives (i.e. records that are backprojected to correct resp. incorrect document sections) and false negatives (i.e. records that were not backprojected even though a document section with a sufficient fraction of the record's values exists). This form of evaluation is out of the scope of the present paper. The most obvious reason is that, at least at present, no annotated gold-level data is available which specifies, for each record, one or more document sections as the correct backprojection target. In addition, it will become clear in what follows that there not even is a simple notion of a correct backprojection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Note on Evaluation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We performed a couple of preliminary experiments, at first setting the parameters to HL ROLE= IG-NORE, REQUIRE NUM MATCH= TRUE, and MIN MATCHES= 2. On the level of the individual document, inspection of experimental results is straightforward: Figure 5 contains a heatmap with the result for one document which shows, for each record 7 (rows) and different values of sec size (columns), the maximum score (top of cell) and the number of sections with this score (bottom of cell). Cell values are only displayed if they change from column to column. The row headers contain the ID and the total number of values for each record (i.e. the size of V(r)), which is the maximum number of possible matches. Figure 5 allows to make several observations: First, two records (269787 and 269763) could not be backprojected at all under the applied settings, which is visible in their score being 0.000 throughout the whole range of sec size values. The overall highest score of 0.833 was reached by six records, each of which has six potentially matchable values, in precisely one section. However, for the first, third, and fifth record, the best match was found for sec size=9, while for the fourth and sixth one, sec size had to be as high as 24, and even 30 for the second one. In other words, while the six values of some of the records were found in close proximity to each other, for others, they were scattered over a range of more than twice resp. three times that size. Next, we inspect the effect of one possible way of using automatically detected highlighting information, by re-running the previous experiment with HL ROLE= ONLY, i.e. we require the presence of highlighting for a token to be part of a match. Ideally, this should improve backprojection precision, by eliminating spurious matches. Given the low incidence of highlighting in our data (only 6.2% of all tokens, cf. Section 3), this might drastically reduce the number of records that can be matched at all. What is more, given the unconstrained way in which the highlighting was applied by the curators, care has to be taken that the presence (and, more importantly, the absence) of highlighting is not over-interpreted. Figure 6 displays the result for the same document with HL ROLE= ONLY. Some effects are clearly visible: Two records are no longer backprojected at all. 8 For two other records (163378193 and 269766), the maximum scores are reduced (from 0.833 to 0.500 and 0.333, respectively). In summary, the above discussion shows that the heatmap visualisation provides a reasonable and reasonably compact representation of a complete DB-to-document backprojection result. It allows to identify record-to-section mappings with varying plausibility, on the basis of how widely scattered the values are in the target sections. This makes it useful for the comparison of different results, like the two results with HL ROLE= IGNORE and ONLY. The actual verification and qualitative evaluation and error analysis, however, requires a more detailed approach (cf. Section 5.3).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 244, |
| "end": 252, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 701, |
| "end": 709, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 2190, |
| "end": 2198, |
| "text": "Figure 6", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Preliminary Experiments", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "For detailed (error) analysis, records can visually be 'projected' to their automatically detected target sections. Figure 7 shows this for one page each from two documents. These results were created with OCR OPTIMIZE and USE SYNONYMS= TRUE, and HL ROLE= IGNORE. Boxes on the right-hand side show (at the top) the record ID and the matching sec size and score, followed by the section text as recognized by OCR, and all search terms, one search term list (cf. Section 4.2) per row. Unmatched values are given in bold red. The following points are interesting to note. For the first record on the top page, the system failed to identify the 'NaCL' token, which was caused by an OCR error which misread 'NaCL' as 'NaCI'. The bottom three records on the top page exemplify the positive effect of the OCR OPTIMIZATION, since '\u00b5M' was only matched because of the replacement 'pM' (the same is true for several records in the lower page). It is also instructive to see that, by setting HL ROLE= IGNORE, two matches could be found in sections without any highlighting. In the lower page, we see the positive effect of USE SYNONYMS= TRUE in the third and forth record, where the replacement 'NAD' was found ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 116, |
| "end": 124, |
| "text": "Figure 7", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Detailed Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "DB-to-document backprojection is related to several NLP and document processing tasks, but it is quite special in that it combines 1) OCR processing of scanned documents, 2) information extraction, 3) template matching, and 4) strictly string-based (as opposed to semantic) matching. Scanned paper documents are much less often subject of text or information extraction than born-digital documents like PDFs. Robust reading 9 is a common 9 https://rrc.cvc.uab.es/ term under which several approaches are collected. A recent approach in this area is DeepReader (Vishwanath et al., 2018) , which is a document understanding approach which seamlessly integrates lowlevel OCR with recognition of higher-level document structure and, to a certain extent, content. Document Visual Question Answering (Mathew et al., 2020) , on the other hand, analyses scanned documents beyond mere OCR of text content, including manually applied highlighting, for answering questions about the documents' content. On the other hand, information extraction and semantic representation or modelling, especially from publications from the bio domain, is a very active field (Vahdati et al., 2019; Anteghini et al., 2020) . The difference, however, is that in these cases, previously unknown information is extracted, based on criteria that often take the form of templates in which potential slot fillers are defined in terms of semantic types (e.g. ENZYME) and (in the case of numerical values), ranges. In DB-to-document backprojection, in contrast, the expected information is explicitly known, fully specified, and 'only' needs to be located on the string level. Therefore, in contrast to a lot of the related work mentioned above, methods involving semantic similarity (like BioBERT (Lee et al., 2020) ) are not necessarily superior to simple string matching when DB-todocument backprojection is concerned.", |
| "cite_spans": [ |
| { |
| "start": 560, |
| "end": 585, |
| "text": "(Vishwanath et al., 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 794, |
| "end": 815, |
| "text": "(Mathew et al., 2020)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1149, |
| "end": 1171, |
| "text": "(Vahdati et al., 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1172, |
| "end": 1195, |
| "text": "Anteghini et al., 2020)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1763, |
| "end": 1781, |
| "text": "(Lee et al., 2020)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this paper, we introduced, defined, and performed some preliminary experiments with DBto-document backprojection, a novel scientific document processing task. Our motivation for attempting this task comes from the requirements of a biocuration project, and from our idea to re-purpose previously unused (or rather under-used) data to advance biocuration methods. The focus of this initial paper was mostly on motivation, on a definition of the task and its functional parameters, and on the development of a better understanding of the effects and interactions of these parameters. For the latter, we performed some simple experiments and analysed the results. Rigorous evaluation, however, was not attempted, and, what is more, our results showed that defining what it means for a backprojection to be correct is difficult. While a quantitative evaluation remains difficult, a qualitative inspection of backprojection results clearly showed that the answer to our original research question is a positive one, which is the main result of this paper. Our findings regarding the role of color highlighting for backprojection, on the other hand, are somewhat mixed: While our system is able to detect highlighted tokens with high accuracy, appropriate ways to integrate this information into backprojection still need be be explored much further. The approach of requiring highlighting for matching, while not disproved yet, might be too strict, and alternative strategies will be evaluated in future work, for which our system and data sets provide a valuable basis. Additional future work includes the following: The optimization heuristics against OCR errors, although already shown to be effective in practice, are far from complete, and should be improved by handling additional cases of OCR errors, and other spelling variants. Also, as suggested by one reviewer, XML versions of papers from e.g. PubMed could be used to inform the backprojection task, which might also include automatic correction of OCR errors. Future work will also include the creation of an annotated dataset by inspecting the automatic results and storing correct highlighting in the extracted XML. Ideally, this should be done with the help and feedback of domain experts. Finally, the system will be applied to our full data set of 6, 000+ documents, which will yield a stronger data basis for analysis. Apart from the obvious use cases like quality assurance in biocuration (and other fields where information is manually extracted from documents), and support for users of biological databases, both by means of visualizations, we also envisage several other potential applications for DB-to-document backprojection. These include creation of multimodal training data for page-topology-based document understanding systems like Katti et al. 2018, creation of input for empirical studies on document structure (distribution of information in scientific documents), and others.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions & Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "This is true for our own group, and has been corroborated in 2016 by an informal, unpublished survey among 21 curators from 15+ biological databases. The survey showed that a considerable number of curators rely on paper printouts for close reading and / or highlighting of important information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For the experiments reported in this paper, we only use a subset of 98 documents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://poppler.freedesktop.org", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://en.wikipedia.org/wiki/Grey", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For semantically identical records, only one, arbitrarily selected ID is provided, because all other records have exactly the same result.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Both are actually false negatives, which were not highlighted but appeared as part of a table, which was highlighted on the title level.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was done as part of the project Deep-Curate, which is funded by the German Federal Ministry of Education and Research (BMBF) (No. 031L0204) and the Klaus Tschira Foundation, Heidelberg, Germany. We thank the anonymous reviewers for their helpful suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Ddiem: drug database for inborn errors of metabolism", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Schofield", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hoehndorf", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Orphanet Journal of Rare Diseases", |
| "volume": "15", |
| "issue": "1", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1186/s13023-020-01428-2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Schofield, and Robert Hoehndorf. 2020. Ddiem: drug database for inborn errors of metabolism. Or- phanet Journal of Rare Diseases, 15(1):146.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Representing semantified biological assays in the open research knowledge graph", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Anteghini", |
| "suffix": "" |
| }, |
| { |
| "first": "D'", |
| "middle": [], |
| "last": "Jennifer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Souza", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "P" |
| ], |
| "last": "V\u00edtor", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Anteghini, Jennifer D'Souza, V\u00edtor A. P. Mar- tins dos Santos, and S\u00f6ren Auer. 2020. Representing semantified biological assays in the open research knowledge graph. CoRR, abs/2009.07642.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Fenf: Servicing the mycosubtilin synthetase assembly line in trans", |
| "authors": [ |
| { |
| "first": "Zachary", |
| "middle": [ |
| "D" |
| ], |
| "last": "Aron", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [ |
| "D" |
| ], |
| "last": "Fortin", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "T" |
| ], |
| "last": "Calderone", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "T" |
| ], |
| "last": "Walsh", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "ChemBioChem", |
| "volume": "8", |
| "issue": "6", |
| "pages": "613--616", |
| "other_ids": { |
| "DOI": [ |
| "10.1002/cbic.200600575" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zachary D. Aron, Pascal D. Fortin, Christopher T. Calderone, and Christopher T. Walsh. 2007. Fenf: Servicing the mycosubtilin synthetase assembly line in trans. ChemBioChem, 8(6):613-616.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Investigating document triage on paper and electronic media", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Buchanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Loizides", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Research and Advanced Technology for Digital Libraries", |
| "volume": "", |
| "issue": "", |
| "pages": "416--427", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Buchanan and Fernando Loizides. 2007. Inves- tigating document triage on paper and electronic me- dia. In Research and Advanced Technology for Dig- ital Libraries, pages 416-427, Berlin, Heidelberg. Springer Berlin Heidelberg.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Reading from paper compared to screens: A systematic review and meta-analysis", |
| "authors": [ |
| { |
| "first": "Virginia", |
| "middle": [], |
| "last": "Clinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Journal of Research in Reading", |
| "volume": "42", |
| "issue": "2", |
| "pages": "288--325", |
| "other_ids": { |
| "DOI": [ |
| "10.1111/1467-9817.12269" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Virginia Clinton. 2019. Reading from paper compared to screens: A systematic review and meta-analysis. Journal of Research in Reading, 42(2):288-325.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Svad: A genetic database curates non-ischemic sudden cardiac death-associated variants", |
| "authors": [ |
| { |
| "first": "Wei-Chih", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hsin-Tzu", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Po-Yuan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Chi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tai-Ming", |
| "middle": [], |
| "last": "Ko", |
| "suffix": "" |
| }, |
| { |
| "first": "Sirjana", |
| "middle": [], |
| "last": "Shrestha", |
| "suffix": "" |
| }, |
| { |
| "first": "Chi-Dung", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chun-San", |
| "middle": [], |
| "last": "Tai", |
| "suffix": "" |
| }, |
| { |
| "first": "Men-Yee", |
| "middle": [], |
| "last": "Chiew", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu-Pao", |
| "middle": [], |
| "last": "Chou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu-Feng", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hsien-Da", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "PLOS ONE", |
| "volume": "15", |
| "issue": "8", |
| "pages": "1--14", |
| "other_ids": { |
| "DOI": [ |
| "10.1371/journal.pone.0237731" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei-Chih Huang, Hsin-Tzu Huang, Po-Yuan Chen, Wei-Chi Wang, Tai-Ming Ko, Sirjana Shrestha, Chi- Dung Yang, Chun-San Tai, Men-Yee Chiew, Yu- Pao Chou, Yu-Feng Hu, and Hsien-Da Huang. 2020. Svad: A genetic database curates non-ischemic sud- den cardiac death-associated variants. PLOS ONE, 15(8):1-14.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Biocuration: Distilling data into knowledge", |
| "authors": [], |
| "year": 2018, |
| "venue": "International Society for Biocuration", |
| "volume": "16", |
| "issue": "4", |
| "pages": "1--8", |
| "other_ids": { |
| "DOI": [ |
| "10.1371/journal.pbio.2002846" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "International Society for Biocuration. 2018. Biocura- tion: Distilling data into knowledge. PLOS Biology, 16(4):1-8.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Can we replace curation with information extraction software? Database: The Journal of Biological Databases and Curation", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Karp", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/database/baw150" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Karp. 2016. Can we replace curation with infor- mation extraction software? Database: The Journal of Biological Databases and Curation, 2016.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Chargrid: Towards understanding 2D documents", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Anoop R Katti", |
| "suffix": "" |
| }, |
| { |
| "first": "Cordula", |
| "middle": [], |
| "last": "Reisswig", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Guder", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Brarda", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Bickel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "H\u00f6hne", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baptiste Faddoul", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4459--4469", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1476" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anoop R Katti, Christian Reisswig, Cordula Guder, Se- bastian Brarda, Steffen Bickel, Johannes H\u00f6hne, and Jean Baptiste Faddoul. 2018. Chargrid: Towards understanding 2D documents. In Proceedings of the 2018 Conference on Empirical Methods in Nat- ural Language Processing, pages 4459-4469, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Reading from computer screen versus reading from paper: does it still make a difference?", |
| "authors": [ |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "K\u00f6pper", |
| "suffix": "" |
| }, |
| { |
| "first": "Susanne", |
| "middle": [], |
| "last": "Mayr", |
| "suffix": "" |
| }, |
| { |
| "first": "Axel", |
| "middle": [], |
| "last": "Buchner", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Ergonomics", |
| "volume": "59", |
| "issue": "5", |
| "pages": "615--632", |
| "other_ids": { |
| "DOI": [ |
| "10.1080/00140139.2015.1100757" |
| ], |
| "PMID": [ |
| "26736059" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maja K\u00f6pper, Susanne Mayr, and Axel Buchner. 2016. Reading from computer screen versus reading from paper: does it still make a difference? Ergonomics, 59(5):615-632. PMID: 26736059.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Bioinform", |
| "volume": "36", |
| "issue": "4", |
| "pages": "1234--1240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomedical language representation model for biomedical text mining. Bioinform., 36(4):1234- 1240.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Sourcedata -a semantic platform for curating and searching figures", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Liechti", |
| "suffix": "" |
| }, |
| { |
| "first": "Nancy", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "El-Gebali", |
| "suffix": "" |
| }, |
| { |
| "first": "Lou", |
| "middle": [], |
| "last": "G\u00f6tz", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Crespo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Xenarios", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Lemberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Nature Methods", |
| "volume": "14", |
| "issue": "", |
| "pages": "1021--1022", |
| "other_ids": { |
| "DOI": [ |
| "10.1101/058529" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robin Liechti, Nancy George, Sara El-Gebali, Lou G\u00f6tz, Isaac Crespo, Ioannis Xenarios, and Thomas Lemberger. 2016. Sourcedata -a semantic platform for curating and searching figures. Nature Methods, 14:1021-1022.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Extracting nanopublications from IR papers", |
| "authors": [ |
| { |
| "first": "Aldo", |
| "middle": [], |
| "last": "Lipani", |
| "suffix": "" |
| }, |
| { |
| "first": "Florina", |
| "middle": [], |
| "last": "Piroi", |
| "suffix": "" |
| }, |
| { |
| "first": "Linda", |
| "middle": [], |
| "last": "Andersson", |
| "suffix": "" |
| }, |
| { |
| "first": "Allan", |
| "middle": [], |
| "last": "Hanbury", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "IRFC", |
| "volume": "8849", |
| "issue": "", |
| "pages": "53--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aldo Lipani, Florina Piroi, Linda Andersson, and Allan Hanbury. 2014. Extracting nanopublications from IR papers. In IRFC, volume 8849 of Lecture Notes in Computer Science, pages 53-62. Springer.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "DocVQA: A dataset for vqa on document images", |
| "authors": [ |
| { |
| "first": "Minesh", |
| "middle": [], |
| "last": "Mathew", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimosthenis", |
| "middle": [], |
| "last": "Karatzas", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Manmatha", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "V" |
| ], |
| "last": "Jawahar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minesh Mathew, Dimosthenis Karatzas, R. Manmatha, and C. V. Jawahar. 2020. DocVQA: A dataset for vqa on document images.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Sonam Dolma, Andrew Willems, Jasmin Coulombe-Huntington, Andrew Chatr-aryamontri, Kara Dolinski, and Mike Tyers. 2019. The BioGRID interaction database: 2019 update", |
| "authors": [ |
| { |
| "first": "Rose", |
| "middle": [], |
| "last": "Oughtred", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Stark", |
| "suffix": "" |
| }, |
| { |
| "first": "Bobby-Joe", |
| "middle": [], |
| "last": "Breitkreutz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "Rust", |
| "suffix": "" |
| }, |
| { |
| "first": "Lorrie", |
| "middle": [], |
| "last": "Boucher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christie", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadine", |
| "middle": [], |
| "last": "Kolas", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Lara", |
| "suffix": "" |
| }, |
| { |
| "first": "Genie", |
| "middle": [], |
| "last": "Donnell", |
| "suffix": "" |
| }, |
| { |
| "first": "Rochelle", |
| "middle": [], |
| "last": "Leung", |
| "suffix": "" |
| }, |
| { |
| "first": "Frederick", |
| "middle": [], |
| "last": "Mcadam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Nucleic Acids Research", |
| "volume": "47", |
| "issue": "D1", |
| "pages": "529--541", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/nar/gky1079" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rose Oughtred, Chris Stark, Bobby-Joe Breitkreutz, Jennifer Rust, Lorrie Boucher, Christie Chang, Na- dine Kolas, Lara O'Donnell, Genie Leung, Rochelle McAdam, Frederick Zhang, Sonam Dolma, An- drew Willems, Jasmin Coulombe-Huntington, An- drew Chatr-aryamontri, Kara Dolinski, and Mike Ty- ers. 2019. The BioGRID interaction database: 2019 update. Nucleic Acids Research, 47(D1):D529- D541.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The use of fluoro-and deoxy-substrate analogs to examine binding specificity and catalysis in the enzymes of the sorbitol pathway", |
| "authors": [ |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ellen" |
| ], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "Ronald", |
| "middle": [ |
| "E" |
| ], |
| "last": "Viola", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Carbohydrate Research", |
| "volume": "313", |
| "issue": "3", |
| "pages": "247--253", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S0008-6215(98)00266-3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mary Ellen Scott and Ronald E. Viola. 1998. The use of fluoro-and deoxy-substrate analogs to exam- ine binding specificity and catalysis in the enzymes of the sorbitol pathway. Carbohydrate Research, 313(3):247 -253.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Semantic representation of scientific publications", |
| "authors": [ |
| { |
| "first": "Sahar", |
| "middle": [], |
| "last": "Vahdati", |
| "suffix": "" |
| }, |
| { |
| "first": "Said", |
| "middle": [], |
| "last": "Fathalla", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00f6ren", |
| "middle": [], |
| "last": "Auer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Lange", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria-Esther", |
| "middle": [], |
| "last": "Vidal", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "TPDL", |
| "volume": "", |
| "issue": "", |
| "pages": "375--379", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sahar Vahdati, Said Fathalla, S\u00f6ren Auer, Christoph Lange, and Maria-Esther Vidal. 2019. Semantic rep- resentation of scientific publications. In TPDL, vol- ume 11799 of Lecture Notes in Computer Science, pages 375-379. Springer.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Deep reader: Information extraction from document images via relation extraction and natural language", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Vishwanath", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohit", |
| "middle": [], |
| "last": "Rahul", |
| "suffix": "" |
| }, |
| { |
| "first": "Gunjan", |
| "middle": [], |
| "last": "Sehgal", |
| "suffix": "" |
| }, |
| { |
| "first": "Arindam", |
| "middle": [], |
| "last": "Swati", |
| "suffix": "" |
| }, |
| { |
| "first": "Monika", |
| "middle": [], |
| "last": "Chowdhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Lovekesh", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vig", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Gautam", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashwin", |
| "middle": [], |
| "last": "Shroff", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computer Vision -ACCV 2018 Workshops -14th Asian Conference on Computer Vision", |
| "volume": "11367", |
| "issue": "", |
| "pages": "186--201", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-030-21074-8_15" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "D Vishwanath, Rohit Rahul, Gunjan Sehgal, Swati, Arindam Chowdhury, Monika Sharma, Lovekesh Vig, Gautam M. Shroff, and Ashwin Srinivasan. 2018. Deep reader: Information extraction from document images via relation extraction and natural language. In Computer Vision -ACCV 2018 Work- shops -14th Asian Conference on Computer Vision, Perth, Australia, December 2-6, 2018, Revised Se- lected Papers, volume 11367 of Lecture Notes in Computer Science, pages 186-201. Springer.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "SABIO-RK: an updated resource for manually curated biochemical reaction kinetics", |
| "authors": [ |
| { |
| "first": "Ulrike", |
| "middle": [], |
| "last": "Wittig", |
| "suffix": "" |
| }, |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Rey", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Weidemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Renate", |
| "middle": [], |
| "last": "Kania", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Nucleic Acids Research", |
| "volume": "46", |
| "issue": "D1", |
| "pages": "656--660", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/nar/gkx1065" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ulrike Wittig, Maja Rey, Andreas Weidemann, Re- nate Kania, and Wolfgang M\u00fcller. 2018. SABIO- RK: an updated resource for manually curated bio- chemical reaction kinetics. Nucleic Acids Research, 46(D1):D656-D660.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Data management and data enrichment for systems biology projects", |
| "authors": [ |
| { |
| "first": "Ulrike", |
| "middle": [], |
| "last": "Wittig", |
| "suffix": "" |
| }, |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Rey", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Weidemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Journal of biotechnology", |
| "volume": "261", |
| "issue": "", |
| "pages": "229--237", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.jbiotec.2017.06.007" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ulrike Wittig, Maja Rey, Andreas Weidemann, and Wolfgang M\u00fcller. 2017. Data management and data enrichment for systems biology projects. Journal of biotechnology., 261:229-237.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "PncStress: a manually curated database of experimentally validated stress-responsive non-coding RNAs in plants", |
| "authors": [ |
| { |
| "first": "Wenyi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dahui", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yincong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanshi", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yujie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/database/baaa001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenyi Wu, Yan Wu, Dahui Hu, Yincong Zhou, Yanshi Hu, Yujie Chen, and Ming Chen. 2020. PncStress: a manually curated database of experimentally vali- dated stress-responsive non-coding RNAs in plants. Database, 2020. Baaa001.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Page with mark-up (best viewed in color).", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "Each record consists of three to six attribute-value (a-v) pairs. Figures 2 and 3 show one example of each type of record. Record of type experimental condition with three a-v pairs featuring one numeric, one atomic string, and one complex string value.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "Record of type parameter with five a-v pairs, featuring two numeric and three atomic string values.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "Highlighted text extraction workflow (best viewed in color).", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "text": "Single Document-level result, HL ROLE=ONLY instead of the originally required 'NAD+'. Finally, the lower page also shows that highlighting is not necessarily associated with extracted information.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF5": { |
| "text": "Sample results ofAron et al. (2007) (top) andScott and Viola (1998) (bottom) (best viewed in color).", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| } |
| } |
| } |
| } |