| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:23:30.022395Z" |
| }, |
| "title": "UWB@FinTOC-2020 Shared Task: Financial Document Title Detection", |
| "authors": [ |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Hercig", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "NTIS -New Technologies for the Information Society", |
| "institution": "University of West Bohemia", |
| "location": { |
| "addrLine": "Technick\u00e1 8", |
| "postCode": "306 14", |
| "settlement": "Plze\u0148", |
| "country": "Czech Republic" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kr\u00e1l", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of West Bohemia", |
| "location": { |
| "addrLine": "Univerzitn\u00ed 8", |
| "postCode": "306 14", |
| "settlement": "Plze\u0148", |
| "country": "Czech Republic" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes our system created for the Financial Document Structure Extraction Shared Task (FinTOC-2020): Title Detection. We rely on the Apache PDFBox library to extract text and all additional information e.g. font type and font size from the financial prospectuses. Our constrained system uses only the provided training data without any additional external resources. Our system is based on the Maximum Entropy classifier and various features including font type and font size. Our system achieves F1 score 81% and #1 place in the French track and F1 score 77% and #2 place among 5 participating teams in the English track.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes our system created for the Financial Document Structure Extraction Shared Task (FinTOC-2020): Title Detection. We rely on the Apache PDFBox library to extract text and all additional information e.g. font type and font size from the financial prospectuses. Our constrained system uses only the provided training data without any additional external resources. Our system is based on the Maximum Entropy classifier and various features including font type and font size. Our system achieves F1 score 81% and #1 place in the French track and F1 score 77% and #2 place among 5 participating teams in the English track.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Financial documents are used to report activities, financial situation, investment plans, and operational information to shareholders, investors, and financial markets. These reports are usually created on an annual basis in machine-readable formats often only with minimal structure information. The majority of these prospectuses are published without a table of content (TOC), which is usually needed to help readers navigate within the document.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The goal of the First Financial Document Structure Extraction Shared Task (FinTOC-2019) (Juge et al., 2019) was to analyse the financial prospectuses and automatically extract their structure similarly to Doucet et al. (2013) . The Second Financial Document Structure Extraction Shared Task (FinTOC-2020) (Bentabet et al., 2020) adds French documents and greatly simplifies the data formats at the cost of not providing any text representation of the PDF files.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 107, |
| "text": "(Juge et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 205, |
| "end": 225, |
| "text": "Doucet et al. (2013)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 305, |
| "end": 328, |
| "text": "(Bentabet et al., 2020)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The goal of FinTOC-2020 shared task is to extract the table of content from the financial prospectuses. Systems participating in this shared task were given a sample collection of financial prospectuses with different levels of structure and different lengths as training data. Data statistics for the title detection subtask are shown in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 339, |
| "end": 346, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The shared task can be divided into two steps: 1) Title detection classifies given text blocks as titles or non-titles. 2) TOC generation organizes provided headers into a hierarchical table of content.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We participated only in the Title detection subtask for both languages. For additional information (e.g. about TOC generation subtask) see the task description paper (Bentabet et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 189, |
| "text": "(Bentabet et al., 2020)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task", |
| "sec_num": "2" |
| }, |
| { |
| "text": "French English Non-title 65.8k (90.8%) 186.3k (94.9%) Title 6.6k (9.2%) 10.1k (5.1%) PDF 47 52 We approached the title detection subtask as a binary classification task. For all experiments, we use Maximum Entropy classifier with default settings from Brainy machine learning library (Konkol, 2014) .", |
| "cite_spans": [ |
| { |
| "start": 284, |
| "end": 298, |
| "text": "(Konkol, 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Label", |
| "sec_num": null |
| }, |
| { |
| "text": "The provided training collection of documents contains the original documents in PDF format and annotations JSON file with gold labels. The JSON file consists of an array of TOC items representing each title with the following properties: text -text of the title, id -order of occurrence the title, depth -depth level of the title, and page -page of title occurrence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In Table 1 we can see that title distribution among French and English data differs greatly, however, we don't know if the reason for this is that the documents have a different structure or a different approach to annotation was used.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We decided to use the Apache PDFBox https://pdfbox.apache.org/ version 2.0.20 to extract text and other metadata from the PDF files and then we use our own algorithm to link the annotations to the extracted text representation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We consider each line of text a separate text segment and classify each segment as title or non-title. If there is a change in the font size or type we split the text into two lines. Additional metadata are extracted from the first occurring word of the given line. The metadata include the following features: Is bold, Is italic, Is all caps, Begins with cap, Begins with numbering, Left position, Font size, and Font type. Note that some of these features were difficult to extract as there are more ways to create e.g. bold text in PDF format and the library does not provide a convenient interface to access e.g. vector elements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The first shared task (FinTOC-2019) had some issues with the mapping of the XML text representation to the annotated CSV gold labels representation as reported by (Hercig and Kr\u00e1l, 2019 ).", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 185, |
| "text": "(Hercig and Kr\u00e1l, 2019", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Issues", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The second shared task (FinTOC-2020) removed the XML text representation and simplified the gold labels representation to JSON format, however, some of the problems still remained.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Issues", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We did not get any annotation guidelines or explanation of some labels. It seems that the annotation process was incoherent -leaving us with different levels of depth and various parts of the title included or left out depending probably on the annotator of the current file or title.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Issues", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We wrote an algorithm that tries to find the best mapping on a given page assuming the annotated text from the JSON training file appears in the same order of occurrence as the text extracted from the PDF file. Unfortunately, that is not always true, thus we decided to modify the training JSON files and fix the issues, described in the following sections, which caused our algorithm to fail. We manually fixed only the necessary part of the dataset in order for our algorithm to work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Issues", |
| "sec_num": "5" |
| }, |
| { |
| "text": "When we found a typo in the id or the page parameters in the JSON file we corrected the value according to the original PDF.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrong Parameter", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In some cases, the beginning of annotated text from the JSON file was missing. We fixed the occurrences our algorithm discovered. See the example below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Missing Text Beginning", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "original:SUBSCRIPTIONS ... fixed:(5) SUBSCRIPTIONS ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Missing Text Beginning", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The JSON file contained wrong text transcription (e.g. additional spaces) that caused our mapping algorithm to fail on the given page because no match for the text was found. We corrected the text according to the original PDF.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrong Text Transcription", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We tried to create the best feature set using all the extracted meta-information. The following features proved useful and were used in our submissions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Character n-grams (ChN n ): Separate feature for each n-gram representing the n-gram presence in the text. We do it separately for different orders n \u2208 {1, 2} and remove n-gram with frequency f \u2264 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Binary Features (B): We use separate binary feature for the following text characteristics (Is bold, Is italic, Is all caps, Begins with cap, Begins with numbering, Is next line empty, Is prev line empty).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Position Features (P): We use four separate binary features to represent the difference in the left position of the text for two sentences. The positions can be equal, lower, greater, and missing. We compare sentence at position p with sentence at position p \u2212 2, p \u2212 1, and p + 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 First Orto-characters (FO): Bag of first three orthographic 1 characters with at least 2 occurrences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Last Orto-characters (LO): Bag of last three orthographic 1 characters with at least 2 occurrences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Font Size (FS): We map the font size of text into a one-hot vector with length twelve and use this vector as features for the classifier. The frequency belongs to one of twelve equal-frequency bins 2 . Each bin corresponds to a position in the vector. We remove font sizes with frequency \u2264 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Font Size Diff (FSD): We use four separate binary features to represent the difference in font size (FS) of the text for two sentences. The positions can be equal, lower, greater, and missing. We compare sentence at position p with sentence at position p \u2212 1 and p + 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Font Type Diff (FTD): We use three separate binary features to represent the difference in font type for two sentences. The font type can be equal, different, and missing. We compare sentence at position p with sentence at position p \u2212 1 and p + 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Font Type Unigrams (FTU): We tokenize font type name and use the presence of unigrams as a feature we remove unigrams with frequency f \u2264 1000.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The results in Table 4 show our ranking in the FinTOC-2020 shared task using the original test dataset. Our submissions and the fixed train datasets for both English and French are available for research purposes at https://gitlab.com/tigi.cz/fintoc-2020. We performed ablation experiments to illustrate which features are the most beneficial (see Table 3 ). Numbers represent the performance change when the given feature is removed (i.e. lower number means better feature). We used approximately 30% of the fixed training dataset 3 for evaluation (test) and we used the rest of the dataset for training the features (see Table 2 ). We also repeated the experiment using leave-one-out cross-validation as the previous experiment seemed inaccurate. Our evaluation measure is macro-averaged F1-score.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 15, |
| "end": 22, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 348, |
| "end": 355, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 623, |
| "end": 630, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We can see that the experiments are inconclusive as some of the findings are in contradiction. The most helpful features in terms of leave-one-out cross-validation apart from character bi-grams include both first and last orto-characters and font type unigrams. Last orto-characters, FSD and FTD were always beneficial. On the contrary position features and font size features were the least helpful features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We believe that the reason that binary features were not very successful (except in French test setting) is that the extraction of these features was not accurate as mentioned in Section 4. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this paper, we described our UWB system participating in the FinTOC 2020 shared task. Our best results have been achieved by the Maximum Entropy classifier combining available metadata, such as font type and font size, by careful feature engineering. Our system is ranked #1 in the French track and #2 among 5 participating teams in the English track.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "All lower cased letters were replaced by \"a\", upper cased letters by \"A\" and digits by \"1\" (e.g. \"Char3\" = \"Aaaa1\").2 The frequencies from the training data are split into twelve equal-size bins according to corresponding quantile.3 We used all annotations for ten and twelve JSON files for French and English respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work has been partly supported by Cross-border Cooperation Program Czech Republic -Free State of Bavaria ETS Objective 2014-2020 (project no. 211).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The Financial Document Structure Extraction Shared task", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Najah-Imane", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9mi", |
| "middle": [], |
| "last": "Bentabet", |
| "suffix": "" |
| }, |
| { |
| "first": "Ismail", |
| "middle": [ |
| "El" |
| ], |
| "last": "Juge", |
| "suffix": "" |
| }, |
| { |
| "first": "Virginie", |
| "middle": [], |
| "last": "Maarouf", |
| "suffix": "" |
| }, |
| { |
| "first": "Dialekti", |
| "middle": [], |
| "last": "Mouilleron", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Valsamou-Stanislawski", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "El-Haj", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "The 1st Joint Workshop on Financial Narrative Processing and MultiLing Financial Summarisation (FNP-FNS 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Najah-Imane Bentabet, R\u00e9mi Juge, Ismail El Maarouf, Virginie Mouilleron, Dialekti Valsamou-Stanislawski, and Mahmoud El-Haj. 2020. The Financial Document Structure Extraction Shared task (FinToc 2020). In The 1st Joint Workshop on Financial Narrative Processing and MultiLing Financial Summarisation (FNP-FNS 2020, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "ICDAR 2013 Competition on Book Structure Extraction", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Doucet", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Kazai", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Colutto", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Mhlberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "2013 12th International Conference on Document Analysis and Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "1438--1443", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Doucet, G. Kazai, S. Colutto, and G. Mhlberger. 2013. ICDAR 2013 Competition on Book Structure Ex- traction. In 2013 12th International Conference on Document Analysis and Recognition, pages 1438-1443, Aug.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "UWB@FinTOC-2019 shared task: Financial document title detection", |
| "authors": [ |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Hercig", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kr\u00e1l", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Second Financial Narrative Processing Workshop (FNP 2019)", |
| "volume": "", |
| "issue": "", |
| "pages": "74--78", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1\u0161 Hercig and Pavel Kr\u00e1l. 2019. UWB@FinTOC-2019 shared task: Financial document title detection. In Proceedings of the Second Financial Narrative Processing Workshop (FNP 2019), pages 74-78, Turku, Finland, September. Link\u00f6ping University Electronic Press.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The FinTOC-2019 Shared Task: Financial Document Structure Extraction", |
| "authors": [ |
| { |
| "first": "R\u00e9mi", |
| "middle": [], |
| "last": "Juge", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "The Second Workshop on Financial Narrative Processing of NoDalida", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R\u00e9mi Juge, Najah-Imane Bentabet, and Sira Ferradans. 2019. The FinTOC-2019 Shared Task: Financial Docu- ment Structure Extraction. In The Second Workshop on Financial Narrative Processing of NoDalida 2019.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Brainy: A Machine Learning Library", |
| "authors": [ |
| { |
| "first": "Michal", |
| "middle": [], |
| "last": "Konkol", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Artificial Intelligence and Soft Computing", |
| "volume": "8468", |
| "issue": "", |
| "pages": "490--499", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michal Konkol. 2014. Brainy: A Machine Learning Library. In Leszek Rutkowski, Marcin Korytkowski, Rafal Scherer, Ryszard Tadeusiewicz, Lotfi Zadeh, and Jacek Zurada, editors, Artificial Intelligence and Soft Comput- ing, volume 8468 of Lecture Notes in Computer Science, pages 490-499. Springer International Publishing.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "type_str": "table", |
| "content": "<table><tr><td>This work is licensed under a Creative Commons Attribution 4.0 International License. License details: http://</td></tr><tr><td>creativecommons.org/licenses/by/4.0/.</td></tr></table>", |
| "num": null, |
| "text": "Data statistics for Title detection.", |
| "html": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "content": "<table><tr><td>Language</td><td colspan=\"2\">French</td><td colspan=\"2\">English</td></tr><tr><td>Label</td><td>Test*</td><td>Train*</td><td>Test*</td><td>Train*</td></tr><tr><td>Title</td><td>1.7k (25.5%)</td><td>5.0k (74.5%)</td><td>3.4k (33.6%)</td><td>6.7k (66.4%)</td></tr><tr><td>Sum</td><td colspan=\"4\">22.7k (31.3%) 49.8k (68.7%) 60.6k (30.9%) 135.7k (69.1%)</td></tr><tr><td>PDF</td><td>10 (21.3%)</td><td>37 (78.7%)</td><td>12 (23.1%)</td><td>40 (76.9%)</td></tr></table>", |
| "num": null, |
| "text": "Non-title 21.0k (31.9%) 44.8k (68.1%) 57.2k (30.7%) 129.0k (69.3%)", |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"2\">Feature F1-test</td><td>F1-leave-one-out</td><td colspan=\"2\">Feature F1-test</td><td>F1-leave-one-out</td></tr><tr><td>ALL *</td><td>89.15%</td><td>96.19%</td><td>ALL *</td><td>87.74%</td><td>95.88%</td></tr><tr><td>ChN 1</td><td>-1.11%</td><td>-0.08%</td><td>ChN 1</td><td>0.70%</td><td>-0.09%</td></tr><tr><td>ChN 2</td><td>0.54%</td><td>-3.54%</td><td>ChN 2</td><td>0.69%</td><td>-2.59%</td></tr><tr><td>B</td><td>-1.67%</td><td>0.00%</td><td>B</td><td>1.51%</td><td>0.00%</td></tr><tr><td>P</td><td>-0.19%</td><td>0.02%</td><td>P</td><td>0.38%</td><td>0.06%</td></tr><tr><td>FO</td><td>-1.18%</td><td>-1.18%</td><td>FO</td><td>0.84%</td><td>-0.67%</td></tr><tr><td>LO</td><td>-1.50%</td><td>-0.90%</td><td>LO</td><td>-0.78%</td><td>-0.97%</td></tr><tr><td>FS</td><td>0.14%</td><td>-0.24%</td><td>FS</td><td>1.68%</td><td>0.00%</td></tr><tr><td>FSD</td><td>-0.61%</td><td>-0.11%</td><td>FSD</td><td>-1.11%</td><td>-0.19%</td></tr><tr><td>FTD</td><td>-0.64%</td><td>-0.24%</td><td>FTD</td><td>-0.60%</td><td>-0.03%</td></tr><tr><td>FTU</td><td>1.03%</td><td>-0.47%</td><td>FTU</td><td>-3.29%</td><td>-0.60%</td></tr><tr><td colspan=\"3\">(a) French</td><td/><td/><td/></tr></table>", |
| "num": null, |
| "text": "Dataset split for experiments.Detailed statistical analysis into the datasets and gold labels for the test set would be needed in order to infer further, more accurate, insides. Using all features in the ablation study. Using all features in the ablation study.", |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table><tr><td/><td/><td/><td>Team</td><td colspan=\"2\">Submission F1</td></tr><tr><td>Team</td><td colspan=\"2\">Submission F1</td><td>Amex</td><td>1</td><td>79%</td></tr><tr><td>UWB</td><td>1</td><td>81%</td><td>Amex</td><td>2</td><td>79%</td></tr><tr><td>taxy.io</td><td>1</td><td>69%</td><td>UWB</td><td>1</td><td>77%</td></tr><tr><td>Daniel</td><td>1</td><td>66%</td><td>Daniel</td><td>1</td><td>69%</td></tr><tr><td>DNLP</td><td>1</td><td>64%</td><td>Daniel</td><td>3</td><td>63%</td></tr><tr><td>Daniel</td><td>2</td><td>64%</td><td>Daniel</td><td>2</td><td>62%</td></tr><tr><td>Daniel</td><td>3</td><td>64%</td><td>DNLP</td><td>1</td><td>59%</td></tr><tr><td/><td>(a) French</td><td/><td>taxy.io</td><td>1</td><td>55%</td></tr><tr><td/><td/><td/><td/><td>(b) English</td><td/></tr></table>", |
| "num": null, |
| "text": "Feature ablation study.", |
| "html": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "content": "<table/>", |
| "num": null, |
| "text": "Results for Title detection.", |
| "html": null |
| } |
| } |
| } |
| } |