| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:46:52.665965Z" |
| }, |
| "title": "Automating Claim Construction in Patent Applications: The CMUmine Dataset", |
| "authors": [ |
| { |
| "first": "Ozan", |
| "middle": [ |
| "K" |
| ], |
| "last": "Tonguz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University Pittsburgh", |
| "location": { |
| "postCode": "15213-3890", |
| "region": "PA", |
| "country": "USA" |
| } |
| }, |
| "email": "tonguz@andrew.cmu.edu" |
| }, |
| { |
| "first": "Yiwei", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University Pittsburgh", |
| "location": { |
| "postCode": "15213-3890", |
| "region": "PA", |
| "country": "USA" |
| } |
| }, |
| "email": "yiweiq@andrew.cmu.edu" |
| }, |
| { |
| "first": "Yimeng", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University Pittsburgh", |
| "location": { |
| "postCode": "15213-3890", |
| "region": "PA", |
| "country": "USA" |
| } |
| }, |
| "email": "yimengg@andrew.cmu.edu" |
| }, |
| { |
| "first": "Hannah", |
| "middle": [], |
| "last": "Moon", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University Pittsburgh", |
| "location": { |
| "postCode": "15213-3890", |
| "region": "PA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Intellectual Property (IP) in the form of issued patents is a critical and very desirable element of innovation in high-tech. In this position paper, we explore the possibility of automating the legal task of Claim Construction in patent applications via Natural Language Processing (NLP) and Machine Learning (ML). To this end, we first create a large dataset known as CMUmine\u2122and then demonstrate that, using NLP and ML techniques the Claim Construction in patent applications, a crucial legal task currently performed by IP attorneys, can be automated. To the best of our knowledge, this is the first public patent application dataset. Our results look very promising in automating the patent application process.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Intellectual Property (IP) in the form of issued patents is a critical and very desirable element of innovation in high-tech. In this position paper, we explore the possibility of automating the legal task of Claim Construction in patent applications via Natural Language Processing (NLP) and Machine Learning (ML). To this end, we first create a large dataset known as CMUmine\u2122and then demonstrate that, using NLP and ML techniques the Claim Construction in patent applications, a crucial legal task currently performed by IP attorneys, can be automated. To the best of our knowledge, this is the first public patent application dataset. Our results look very promising in automating the patent application process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In the USA, European Union (EU), and Asia, most of the high-tech industries (semiconductors, wireless, Internet, Telecommunications, Robotics, Sensors, etc.) are characterized by the innovations they introduce and the inventions they make in a specific area of technology and these innovations are considered to be the intellectual property (IP) of these companies, a very important asset for any high-tech company. To protect their IP, high-tech companies file for patent applications to make it official that they own that specific idea and invention that could involve a new system or apparatus, new method, and new algorithms and/or software.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 157, |
| "text": "(semiconductors, wireless, Internet, Telecommunications, Robotics, Sensors, etc.)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In general, filing patents that describe new inventions is a lengthy, cumbersome, and very costly process since most high-tech companies have to hire law firms, litigation attorneys who specialize in IP, and technical professionals called patent \"agents\", or \"patent engineers\" for writing and filing such patent applications. After an inventor prepares a well-written document describing his/her invention, she/he submits this document to an IP attorney who takes this document that is known as \"Invention Disclosure\" and prepares a patent application that can be submitted in the US to the United States Patent and Trademark Office (USPTO).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main sections of a patent comprise the following sections: 1) Title and Inventors 2) Abstract 3) Introduction (background of the invention and \"prior art\") 4) Invention Summary 5) Description of the invention including figures (also known as \"specifications\") 6) Claims (independent claims and dependent claims)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A key observation we make is that, if the Invention Disclosure is well prepared by the inventor, then the main contribution of the IP attorney to the patent application is to formulate the claims of the specific invention. In legal terms, this task is known as \"Claim Construction\". An IP attorney will not typically change the other parts of an invention (e.g., the specifications section of the invention); instead, he will formulate the main claims of the invention in the form of: (i) Independent Claims (ii) Dependent Claims and then append these claims to the end of the invention disclosure for the official submission of the patent application to the USPTO. Typically, patents in information technology have 3 Independent Claims and 5 or 6 dependent claims per independent claim that do depend on each of the 3 independent claims, thus resulting in a total of 20 or more claims in a patent application. In general, the independent claims concern the following aspects of an invention: 1) System or apparatus claim 2) Method claim 3) Software claim", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this position paper, we explore the possibility of replacing the \"human agents\" with an automated solution (i.e., \"the machine\") in patent application process. In other words, we investigate whether the construction of the independent and dependent claims prepared by an IP attorney can be automated by using AI, Natural Language Processing (NLP), and Machine Learning (ML). The key observation behind this is the underlying pattern in preparing the claims in a patent application: the IP attorney gets a well written Invention Disclosure from an inventor and, based on that and a brief conversation with the inventor, prepares the claims of a patent application. Can this process be automated? Our results suggest that the use of NLP and ML can indeed automate \"Claim Construction\" tasks and, therefore, the patent application process. As a proof-of-concept, among all the claims constructed by IP attorneys, this paper focuses on generating the First Independent Claim. The approach we pursue in this paper is to formulate the problem as a text summarization problem [El-Kassas et al., 2021] .", |
| "cite_spans": [ |
| { |
| "start": 1072, |
| "end": 1096, |
| "text": "[El-Kassas et al., 2021]", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Statement", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Text summarization aims to briefly summarize the key information of any longer input text. Text summarization techniques using NLP has been successfully applied to various fields, including news [Grusky et al., 2018] [Fabbri et al., 2019] , scientific papers [Lu, 2011] [Clement et al., 2019] and patents [Grusky et al., 2020] . Specifically, in [Grusky et al., 2020] , the author models the text summarization task as follows: a granted patent's description is the input text, and its summary is regarded as the gold-standard summary. Our task is similar to other text summarization tasks, i.e., summarizing the First Independent Claim from a longer Invention Disclosure. Therefore, a viable approach that might work is to model our task as a text summarization task.", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 216, |
| "text": "[Grusky et al., 2018]", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 217, |
| "end": 238, |
| "text": "[Fabbri et al., 2019]", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 259, |
| "end": 269, |
| "text": "[Lu, 2011]", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 270, |
| "end": 292, |
| "text": "[Clement et al., 2019]", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 305, |
| "end": 326, |
| "text": "[Grusky et al., 2020]", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 346, |
| "end": 367, |
| "text": "[Grusky et al., 2020]", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To meet our goal, we started by collecting a large dataset consisting of 300K patent applications, known as Carnegie Mellon University Machine Interpreted Natural-Language Engineering (CMUmine\u2122). Using this very large data set, dubbed CMUmine\u2122, of 317,356 previous US patent applications, we create a training set, a validation data set, and a test set that comprise 253,976, 31,736, and 31, 644 data points (i.e., previous patent applications), respectively. This is the first public and largest patent application dataset to the best of our knowledge. BigPatent [Grusky et al., 2020 ] is a well-known large text summarization dataset in patent domain, but it does not include patent claims and cannot be used for claim construction.", |
| "cite_spans": [ |
| { |
| "start": 564, |
| "end": 584, |
| "text": "[Grusky et al., 2020", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection and Dataset Construction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our dataset consists of issued patent applications collected from USPTO Bulk Data Storage System (BDSS) Version 1.1.0 1 . Specifically, our raw data comes from the Patent Application Full Text Data (No Images) (MAR 15, 2021 -PRESENT) in the link above. We only used the patent applications issued in 2005 and 2006 to construct our dataset. Based on a detailed literature review on popular text summarization dataset's size, we decided to collect roughly 300,000 data points. Initially, we began to process data from the year 2002 and found that from the year 2002 to year 2004 datasets have non-standardized data structures. When we processed the year 2005 data set, it had a suitable structure for processing the data. It also met our expectations on the amount of data needed to train models after accumulating two years of data in a row. Therefore, we decided to use the year 2005 to 2006 data envisioning that the same arguments can be applied for data collected in recent years as well. Our data set can be found at this link 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Description", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The raw data on USPTO BDSS is in the format of XML, e.g., ipa150903.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Processing", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "xml. Each xml file contains all the patent applications issued during that week of a certain year.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Processing", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Considering the fact that patent applications are organized in different ways, to reduce the variation in size of summary and First Independent Claim and build a more representative dataset, we applied filter conditions to remove outliers. We only kept patent applications whose Invention Summary and First Independent Claim's length are within the percentile [10%,90%], that is [150,1500] words for Invention Summary and [35, 300] words for First Independent Claim.", |
| "cite_spans": [ |
| { |
| "start": 422, |
| "end": 426, |
| "text": "[35,", |
| "ref_id": null |
| }, |
| { |
| "start": 427, |
| "end": 431, |
| "text": "300]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Processing", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Our dataset consists of training set, validation set, and test set with a ratio of 8:1:1. Under train/validation/test folder, there are 6 subfolders: 1) abstract; 2) background; 3) summary; 4) detailed description; 5) first independent claim; and 6) claims.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Structure", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Under each sub-folder, every single file is named in the format of {patent application No.}_{sub-folder name}. All the files only contain text data. It was observed that not every US patent application contains a detailed description section. If a patent application does not have a detailed description section, we do not include it in the detailed description sub-folder. Around 2/3 (two thirds) of patent applications in our dataset have detailed description part.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Structure", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "To have good insights into the features of our dataset, we use several automatic metrics to quantify its important features (e.g., average length, extractivity, compression, novel words).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Compression ratio [Grusky et al., 2020] : ratio between source document and output length. Compression ratio is measured by", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 39, |
| "text": "[Grusky et al., 2020]", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "CM P (S, O) = S O ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "where |S| and |O| denote the length of the source document and output sequence, respectively. Coverage [Grusky et al., 2020] : measures the percentage of words in the output sequence that are part of an extractive fragment in the source document. Coverage is measured by", |
| "cite_spans": [ |
| { |
| "start": 103, |
| "end": 124, |
| "text": "[Grusky et al., 2020]", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Coverage(S, O) = 1 O f \u2208F (S,O) |f |,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "where F (S, O) is the set of shared sequences of tokens in source S and output sequence O. Density [Grusky et al., 2020] : measures the average length of the extractive fragment. Density is measured by", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 120, |
| "text": "[Grusky et al., 2020]", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Density(S, O) = 1 |O| f \u2208F (S,O) |f | 2 . (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Copy Length [Chen et al., 2020] : measures the average length of segments in output sequence copied from source document.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 31, |
| "text": "[Chen et al., 2020]", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Novelty n-gram ratio [Narayan et al., 2018] : the proportion of segments in the output sequence that haven't appeared in source documents. The segments can be instantiated as n-grams.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 43, |
| "text": "[Narayan et al., 2018]", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "As mentioned before, our dataset includes different parts of an invention disclosure document. Since the useful information to generate claims are scattered to different sections of this document, it is important to evaluate the features of different parts to decide which part is the best to use as the input sequence for our model to generate claims. For simplicity, we do not consider the combination of two parts and the detailed description part, whose length exceeds the capacity of both Recurrent Neural Networks (RNN) based model and Transformer [Vaswani et al., 2017] based model. We evaluate the dataset characteristics using Abstract, Introduction, or Invention Summary as source document and First Independent Claims as the output. The dataset evaluation result is shown in Table 1 .", |
| "cite_spans": [ |
| { |
| "start": 554, |
| "end": 576, |
| "text": "[Vaswani et al., 2017]", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 786, |
| "end": 793, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "From Table 1 , we observe that the summary part has the highest extractive rate, which is reflected in the highest density, coverage, copy length and lowest novelty words ratio. This means the summary part is the most informative part for generating claims, so we use the summary part as the model input in our work.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 12, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "As described in Section 2, we formulate our problem as a text summarization task, so we use popular summarization systems: Pointer Generator (PG) [See et al., 2017] and PEGASUS [Zhang et al., 2019] . We used the training data set to train our model, the validation data set to fine-tune our model in terms of hyper parameters and then evaluated the performance of our model in generating the First Independent Claim (FIN) on the test set. The results obtained are compared with the \"Gold Standard\" that represent the versions of the same FIN constructed by humans (i.e., IP attorneys). The obtained machine-generated results are compared in terms of ROUGE-1, ROUGE-2, and ROUGE-L [Lin, 2004] scores based on the statistical distributions of the results obtained, i.e., the probability density functions (pdf's) of the results.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 164, |
| "text": "[See et al., 2017]", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 177, |
| "end": 197, |
| "text": "[Zhang et al., 2019]", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 680, |
| "end": 691, |
| "text": "[Lin, 2004]", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "6.1 Evaluation of the generated first claim Table 2 : Average F1 scores of the first independent claim generated with the test set for Rouge-1, Rouge-2, and Rouge-L.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 44, |
| "end": 51, |
| "text": "Table 2", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "It can be observed from Table 2 that the performance of self-attention approach is distinctively higher than the Pointer Generator approach. PE-GASUS works very well on our dataset with average ROUGE scores achieving 60-70%, which is a very high score for the abstractive summarization task in NLP. The state-of-the-art ROUGE score for popular text summarization datasets are among 30-60% [Zhang et al., 2019] . This attests to the fact that using NLP techniques, it is possible to accurately generate the FIN that is arguably the most important claim among all the claims in a patent application. Figure 1 shows the probability distributions of the resulting rouge scores when one uses the Pegasus approach on our test set of 31,644 data points.", |
| "cite_spans": [ |
| { |
| "start": 389, |
| "end": 409, |
| "text": "[Zhang et al., 2019]", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 24, |
| "end": 31, |
| "text": "Table 2", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 598, |
| "end": 606, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Based on the obtained probability distributions (i.e., probability density functions) shown in Figure 1 , and the expected value (mean) of these distributions, one can observe that our chosen model performs very well on the dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 95, |
| "end": 103, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "While the results reported look very promising, our current experiments have the following limitations: 1) We used abstractive summarization to generate the first independent claim which limits the input length. This might be solved by using a combination of extractive summarization, like heuristics, and abstractive summarization; 2) Our results show that our generated claim is mainly a summary of what the inventor writes. However, the claims generated should also establish the novelty of the invention by looking at other patents and/or published papers in the public domain for similar inventions in the same space, instead of only focusing on one single patent application, which requires incorporating external knowledge; 3) Using ROUGE score as the evaluation metric, which focuses on the syntax similarities between the generated claims and the gold standards, might not be sufficient to evaluate the quality of the generated claims. Other aspects, such as semantic similarity, factuality, etc. need to be considered as well. We plan to address these problems in future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Our results suggest that claim construction and the patent application process can be largely automated in the future with the help of AI, natural language processing, and machine learning. This will have far-reaching consequences such as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "\u2022 democratizing the landscape for innovation and inventions, thus enabling small businesses, underrepresented groups, and individual inventors (in addition to big companies) to file for patents in a much more cost-effective manner to own IP;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "\u2022 expediting the submission and issuing of patent applications dramatically (from 3 or 4 years to less than 1 year), thus making the IP litigation process much more efficient;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "\u2022 facilitating disruptive changes in the IP litigation process by AI where the machine will be able to do some, if not most, of the legal tasks currently performed by humans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "We hope that our results will stimulate further research into using AI, Natural Language Processing, and Machine Learning for automating the patent application process. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "https://bulkdata.uspto.gov/ 2 https://drive.google.com/drive/u/0/ folders/1J4sAcM_21G39VuZT1jv6RqLTEM_ UngWS", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Cdevalsumm: An empirical study of cross-dataset evaluation for neural summarization systems", |
| "authors": [ |
| { |
| "first": "Yiran", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Zi-Yi", |
| "middle": [], |
| "last": "Dou", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqing", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiran Chen, Pengfei Liu, Ming Zhong, Zi-Yi Dou, Dan- qing Wang, Xipeng Qiu, and Xuanjing Huang. Cde- valsumm: An empirical study of cross-dataset evalu- ation for neural summarization systems, 2020.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "On the use of arxiv as a dataset. CoRR, abs", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [ |
| "B" |
| ], |
| "last": "Clement", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Bierbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [ |
| "P" |
| ], |
| "last": "O'keeffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "A" |
| ], |
| "last": "Alemi", |
| "suffix": "" |
| } |
| ], |
| "year": 1905, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin B. Clement, Matthew Bierbaum, Kevin P. O'Keeffe, and Alexander A. Alemi. On the use of arxiv as a dataset. CoRR, abs/1905.00075, 2019. URL http://arxiv.org/abs/1905.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Automatic text summarization: A comprehensive survey", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Wafaa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "El-Kassas", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Cherif", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [ |
| "A" |
| ], |
| "last": "Salama", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoda K", |
| "middle": [], |
| "last": "Rafea", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Expert Systems with Applications", |
| "volume": "165", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wafaa S El-Kassas, Cherif R Salama, Ahmed A Rafea, and Hoda K Mohamed. Automatic text summariza- tion: A comprehensive survey. Expert Systems with Applications, 165:113679, 2021.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Multi-news: A large-scale multidocument summarization dataset and abstractive hierarchical model", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Fabbri", |
| "suffix": "" |
| }, |
| { |
| "first": "Irene", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianwei", |
| "middle": [], |
| "last": "She", |
| "suffix": "" |
| }, |
| { |
| "first": "Suyi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1074--1084", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Fabbri, Irene Li, Tianwei She, Suyi Li, and Dragomir Radev. Multi-news: A large-scale multi- document summarization dataset and abstractive hi- erarchical model. In Proceedings of the 57th An- nual Meeting of the Association for Computational Linguistics, pages 1074-1084, Florence, Italy, July 2019. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Newsroom: A dataset of 1.3 million summaries with diverse extractive strategies", |
| "authors": [ |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Grusky", |
| "suffix": "" |
| }, |
| { |
| "first": "Mor", |
| "middle": [], |
| "last": "Naaman", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "708--719", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Max Grusky, Mor Naaman, and Yoav Artzi. News- room: A dataset of 1.3 million summaries with di- verse extractive strategies. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long Pa- pers), pages 708-719, New Orleans, Louisiana, June 2018. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Newsroom: A dataset of 1.3 million summaries with diverse extractive strategies", |
| "authors": [ |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Grusky", |
| "suffix": "" |
| }, |
| { |
| "first": "Mor", |
| "middle": [], |
| "last": "Naaman", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Max Grusky, Mor Naaman, and Yoav Artzi. News- room: A dataset of 1.3 million summaries with di- verse extractive strategies, 2020.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Rouge: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text summarization branches out", |
| "volume": "", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. Rouge: A package for automatic evalu- ation of summaries. In Text summarization branches out, pages 74-81, 2004.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "PubMed and beyond: a survey of web tools for searching biomedical literature", |
| "authors": [ |
| { |
| "first": "Zhiyong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Database", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/database/baq036" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiyong Lu. PubMed and beyond: a survey of web tools for searching biomedical literature. Database, 2011, 01 2011. ISSN 1758-0463. doi: 10.1093/ database/baq036. URL https://doi.org/10. 1093/database/baq036. baq036.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Don't give me the details, just the summary! topicaware convolutional neural networks for extreme summarization", |
| "authors": [ |
| { |
| "first": "Shashi", |
| "middle": [], |
| "last": "Narayan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shay", |
| "middle": [ |
| "B" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shashi Narayan, Shay B. Cohen, and Mirella Lapata. Don't give me the details, just the summary! topic- aware convolutional neural networks for extreme summarization, 2018.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Get to the point: Summarization with pointergenerator networks", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Peter J. Liu, and Christopher D. Man- ning. Get to the point: Summarization with pointer- generator networks, 2017.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wal- lach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "PEGASUS: pre-training with extracted gap-sentences for abstractive summarization", |
| "authors": [ |
| { |
| "first": "Jingqing", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yao", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Saleh", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "J" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Pe- ter J. Liu. PEGASUS: pre-training with extracted gap-sentences for abstractive summarization. CoRR, abs/1912.08777, 2019.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Probability Distribution of F1 scores for ROUGE-1, ROUGE-2, and ROUGE-L." |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "content": "<table><tr><td>reports F1 scores of ROUGE-1 (R1),</td></tr><tr><td>ROUGE-2 (R2), ROUGE-L (RL) for all models.</td></tr></table>", |
| "num": null, |
| "text": "", |
| "html": null |
| } |
| } |
| } |
| } |