| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:46:51.146564Z" |
| }, |
| "title": "A Free Format Legal Question Answering System", |
| "authors": [ |
| { |
| "first": "Soha", |
| "middle": [], |
| "last": "Khazaeli", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Janardhana", |
| "middle": [], |
| "last": "Punuru", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Chad", |
| "middle": [], |
| "last": "Morris", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Sanjay", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Bert", |
| "middle": [], |
| "last": "Staub", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Cole", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Sunny", |
| "middle": [], |
| "last": "Chiu-Webster", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "sunnycw1@gmail.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present an information retrieval-based question answer system to answer legal questions. The system is not limited to a predefined set of questions or patterns and uses both sparse vector search and embeddings for input to a BERT-based answer re-ranking system. A combination of general domain and legal domain data is used for training. This natural question answering system is in production and is used commercially.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present an information retrieval-based question answer system to answer legal questions. The system is not limited to a predefined set of questions or patterns and uses both sparse vector search and embeddings for input to a BERT-based answer re-ranking system. A combination of general domain and legal domain data is used for training. This natural question answering system is in production and is used commercially.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Question answering (QA) applications range from simple yes/no systems to complex questions where answers might be synthesized from several sources (Voorhees and Tice, 2000) . This work concerns a QA system for legal research. The system is designed to answer both factoid (Agichtein et al., 2005) and non-factoid questions. A short answer can satisfy factoid questions, e.g., \"What is the burden of proof for breach of contract?\". In contrast, non-factoid questions are open-ended and an adequate answer needs opinions or explanations (Hashemi et al., 2020) , e.g., \"Why does child support increase with income?\".", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 172, |
| "text": "(Voorhees and Tice, 2000)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 272, |
| "end": 296, |
| "text": "(Agichtein et al., 2005)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 535, |
| "end": 557, |
| "text": "(Hashemi et al., 2020)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A typical system user is a litigator seeking answers to case-specific legal questions. Those answers inform the creation of litigation documents, such as pleadings, briefs, and motions. A system should provide complete multiple-sentence answers with context that can be cited. A legal QA system must also handle questions where no single answer exists. For example, the useful answer may be jurisdiction specific, or be time frame dependent because the law evolves. Importantly, the best answers can depend on the lawyer's perspective because application of the law can make fine distinctions per the case facts, or recognizes competing principles or mitigating factors. Legal practice areas can have distinctive concerns. For example, the scope of legal principles and practice affecting family law legal matters is distinguishable from those applied in bankruptcy law. Developing an effective and useful question answering system in this setting faces state of the art challenges, including creation of training collections and performance metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We present a retrieval-based legal domain QA system designed to provide useful answers for all legal practice areas. It is designed for customers with real-world tasks while meeting cost, response time, and scalability constraints. This system is in production and serving customers. We also describe an experiment methodology found to have pragmatic value in system development. The methodology can be useful in domains with similar context-laden QA characteristics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent deep learning open domain QA research successfully applied a retrieve and read paradigm. The retrieval step selects candidate documents, then a reading component finds answers (Chen et al., 2017; Das et al., 2019; Yang et al., 2019) . We adopted a similar approach.", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 202, |
| "text": "(Chen et al., 2017;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 203, |
| "end": 220, |
| "text": "Das et al., 2019;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 221, |
| "end": 239, |
| "text": "Yang et al., 2019)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Often, the systems employ standard retrieval methods based on sparse vector space approaches like TF-IDF (Jones, 1972) and BM25 (Robertson and Sp\u00e4rck Jones, 1994) . Dense vector representations with distributional semantic properties based on LSA (Landauer et al., 1998) , GLoVe (Pennington et al., 2014) , and sentence embedding (Reimers and Gurevych, 2019) are also used. Custom domain embedding methods such as Legalbert have been studied (Chalkidis et al., 2020) . The QA retrieval target is usually at the passage level rather than complete documents (Luan et al., 2021) . Paragraph-based legal domain search has been studied (Zhang and Steiner, 2018) . Answer extraction techniques have used Machine Reading Comprehension (MRC) (Seo et al., 2016) and DrQA (Chen et al., 2017) . Several ranking models have been ap- (Yang et al., 2019; Nogueira and Cho, 2019) . One COLIEE 1 task is to answer yes/no legal questions and retrieve germane legal documents. Competitors have used TF-IDF and BM25, and also contextual embedding vectors such as BERT and ELMo, to score passages against the question (Rabelo et al., 2019) . Lex-isNexis legacy answer cards address knowledgebased QA by detecting the user query intent and then serving a previously mined answer if appropriate (Kumar and Politi, 2019; Shankar and Buddarapu, 2018) . The WestSearch QA system categorizes non-factoid questions to different frames. To answer a frame-specific question, a trained framespecific question-answer pair classifier is used to recognize a retrieved passage as an answer (McElvain et al., 2019) . Both of these commercial legal QA system can only handle a limited range of questions.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 118, |
| "text": "(Jones, 1972)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 150, |
| "end": 162, |
| "text": "Jones, 1994)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 243, |
| "end": 270, |
| "text": "LSA (Landauer et al., 1998)", |
| "ref_id": null |
| }, |
| { |
| "start": 279, |
| "end": 304, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 330, |
| "end": 358, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 442, |
| "end": 466, |
| "text": "(Chalkidis et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 556, |
| "end": 575, |
| "text": "(Luan et al., 2021)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 631, |
| "end": 656, |
| "text": "(Zhang and Steiner, 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 734, |
| "end": 752, |
| "text": "(Seo et al., 2016)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 762, |
| "end": 781, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 821, |
| "end": 840, |
| "text": "(Yang et al., 2019;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 841, |
| "end": 864, |
| "text": "Nogueira and Cho, 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1098, |
| "end": 1119, |
| "text": "(Rabelo et al., 2019)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1273, |
| "end": 1297, |
| "text": "(Kumar and Politi, 2019;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1298, |
| "end": 1326, |
| "text": "Shankar and Buddarapu, 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1556, |
| "end": 1579, |
| "text": "(McElvain et al., 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In contrast to the previous work, the system presented here is designed to answer almost all legal content questions without legal practice area restrictions. In particular, the question coverage is not limited by pattern or frame.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The system selects answers by re-ranking search results obtained using both sparse vector techniques (BM25) and a dense vector approach (semantic embedding). Figure 1 shows a simplified system architecture. The search repository contains the passage text and passage embeddings. The search engine retrieves passages by text and embedding similarity. The answer finder re-ranks the retrieved passages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 166, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1 Competition on Legal Information Extraction/Entailment", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The search collection is a pool of passages that highlight key aspects of each legal document. It consists of case-law Headnotes 2 and RFCs 3 . The collection contains over 100 million passages. Our experiments were conducted with a 10% sample of the collection. The search engine has the usual goal of retrieving relevant passages. Beyond topical germaneness, the retrieval engine must also detect sufficient context in the passage. To that end, the retrieval answer set coverage is enriched using both sparse vector and semantic embedding passage representations. The system used a Query By Document method (QBD) (Yang et al., 2018) , more like this, implemented with BM25 for sparse vector passage representation retrieval. The dense embedding enrichment used Legal GloVe and Legal Siamese BERT embeddings. The GloVe embeddings are built using 64GB of legal text with a 300K word vocabulary and 200 dimensions. Passages and questions are encoded using the average of the word embeddings.", |
| "cite_spans": [ |
| { |
| "start": 615, |
| "end": 634, |
| "text": "(Yang et al., 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieving Passages", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The Siamese Legal BERT system is trained to retrieve similar passages in a contextual vector space (Reimers and Gurevych, 2019) . The training data is a sample of 100,000 headnotes. Given a headnote, the most similar headnote using BM25 is identified as a positive similar passage. Five random headnotes are added as negative instances. We ensured discriminative challenge amongst the negative instances using a procedure described below. The system was trained using a regression objective function with cosine loss. The input sentence embedding uses the Legal BERT base model with mean pooling of the tokens embedding. The Legal BERT model was trained in-house from scratch with a custom legal vocabulary on the last 20 years of US case-law documents. The model is trained with train_batch_size = 16. We used Spearman and Pearson correlation upward trends as convergence indicators.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 127, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrieving Passages", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The answer finder accepts a question passage pair and computes the probability the passage answers the question. A BERT sequence binary classifier is trained on questionanswer pairs. The answer finder input is the concatenation of question(Q) and passage(P ) as \"[CLS]<Q>[SEP]<P>[SEP]\". answer finder is trained by fine tuning Legal BERT. The BERT classifier uses [CLS] representation with two fully connected layers with a final softmax layer.", |
| "cite_spans": [ |
| { |
| "start": 364, |
| "end": 369, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Finder", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The nature of legal language and the high-stakes nature of lawyer tasks require subject matter experts (SMEs) to judge legal QA system performance. The legal training data was created inhouse by certified lawyers. The training dataset had over 10,000 annotated legal question answer pairs covering legal practice areas. Questions with long paragraph answers (107,089) are selected from Natural Questions (NQ) (Kwiatkowski et al., 2019) and added to training data to improve generalization.", |
| "cite_spans": [ |
| { |
| "start": 409, |
| "end": 435, |
| "text": "(Kwiatkowski et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Finder", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The system was fine-tuned in two stages. First, question-answer pairs were created by selecting a random negative passage for each question. The tuned answer finder then predicts the probability on all negative samples. In the second stage, for each question the negative answer with the highest probability of being a good answer is selected. The goal is to increase the challenge of good answer discrimination. The validation set is real-world questions extracted from user-logs. The training hyper-parameters are: learning_rate : 2e \u2212 5, max_seq_length : 512, num_train_epochs : 3, do_lower_case : T rue, batch_size = 8. We found that examination of the first 128 wordpieces (max_seq_length : 128) doesn't significantly lower validation set accuracy, so that was used in large batch experiments. Reduced max_seq_length also improves latency performance in production. Table 1 shows the answer evaluation scale used by the subject matter experts (SMEs). Simple \"yes/no\" judgments are clearly inadequate for complex domain QA systems.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 871, |
| "end": 878, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer Finder", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Two test sets were used: 100 and 1000 questions. The small set proved valuable for rapid development cycles. Both sets consist of 50% actual user questions with additional questions from SMEs to ensure coverage of legal research goals including content, entity, and analytic questions. The QA system presented here focuses on answering content questions and may not provide good answers for en-Score Criteria -1 so unrelated that a user will lose patience with the system ('silly') 0 off point and is not reasonably related 1 right topic but not an answer 2 partial answer 3 good answer Table 1 : Answer evaluation scale tity and analytic questions. This limitation affects the performance for the smaller test set reported in Table 3 . The performance of the system using the large test set are reported in Appendix A. The search corpus was bulk embedded and queried using BM25, Siamese Legal BERT, and Legal GloVe. Similarity calculations use the Euclidean metric.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 587, |
| "end": 594, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 727, |
| "end": 734, |
| "text": "Table 3", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The input to answer finder is the top 30 retrieved passages for each retrieval method. answer finder re-ranks the list by answer probability and selects the top 3 passages. The top 3 passages for each method were evaluated. SMEs evaluated these answer sets in a random order. Table 2 shows retrieved passages, system answers, and the SME evaluation for \"Is an airline liable for its pilot's negligence?\" BM25_MLT picked a long passage with multiple occurrences of 'airline', 'pilot', 'liable' and 'negligence'. It was judged as off point, despite some topical overlap. Legal GloVe and Legal Siamese BERT picked a semantically-similar short passage even though 'pilot' does not appear. It was judged as topical but not an answer. answer finder on BM25_MLT picked a good answer passage with all the question elements and actors, and also discusses the conditions in which the carrier is liable for a pilot's negligence. This passage was promoted from rank 27 of the 30 BM25_MLT passages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 276, |
| "end": 283, |
| "text": "Table 2", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The answer evaluation scale has five levels and the SMEs agreed that an acceptable answer will score a '2' or '3'. SME evaluator interrater agreement (IRR) is excellent, given the complexity of legal question answering task, with an agreement level of 84% for acceptable answers and 91% for unacceptable answers. Setting a threshold of '2' in the test set, the system performance was: F1 0.766, precision 0.827, recall: 0.713, and accuracy 0.759. F1 and accuracy provide classifier performance metrics. The production legal research system presents the top three answers to users, so we also use ranked search results evaluation metrics Is an airline liable for its pilot's negligence? SME label Method A carrier would not be liable for an error of judgment of the pilot, not constituting positive negligence on his part in exercising such judgment; but liability is incurred if the pilot, by his negligent and careless conduct, has created a situation requiring the formation of a judgment and then errs in the exercise thereof.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "An airline corporation is not an insurer of the safety of its passengers. The liability of an airline corporation must be based on negligence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "AF on BM25_MLT", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1 L_GloVe, SL_BERT Airline pilot who was accused of raping flight attendant has no tort claim against airline based upon its alleged negligent investigation of accusation, even if airline's policy of investigating sexual harassment complaints creates duty to use due care in conducting investigation,...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "AF on BM25_MLT", |
| "sec_num": "3" |
| }, |
| { |
| "text": "to assess the system performance with DCG (Discounted Cumulative Gain) and MRR (Mean Reciprocal Rank).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BM25_MLT", |
| "sec_num": "0" |
| }, |
| { |
| "text": "In this system answer finder is employed as a re-ranker, but it could also be used as a threshold filter on the answers. Table 3 compares alternative combinations of system components.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 121, |
| "end": 128, |
| "text": "Table 3", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "BM25_MLT", |
| "sec_num": "0" |
| }, |
| { |
| "text": "Promising system combinations are bolded. Production systems have practical constraints and cannot simply optimize performance without considering factors like user experience, cost, and scalability. Re-ranking retrieved passages by each retrieval method increased the average DCG, confirming the value of adding answer finder to the system. Another advantage of the re-ranker is the ability to combine multiple retrieval methods to improve DCG performance. Combining dense and sparse retrieved passages both increases system cost and creates scaling challenges. However, the richer representation can improve user experience because users are able to ask their question using a greater variety of words. Another advantage of using a reranker is the capacity to apply a threshold to help filter unrelated passages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BM25_MLT", |
| "sec_num": "0" |
| }, |
| { |
| "text": "Case-specific error-analysis helps to identify shortcomings in the training data coverage. Categorized DCG analysis indicates the QA system provides good answers for well-defined legal questions such as standard of review 4 question (e.g. What is standard of review for marital property allocation decision in Florida?) . Single topic questions are free format questions about specific legal issue, e.g \"Can an executor compromise a claim without beneficiary consent?\". The QA system performs significantly better than previous systems on single topic questions, although there remains room for improvement. Optimizing the real world performance of a legal QA system needs to consider the frequency of each question category, the space for improvement, and the question category importance judged by SMEs. We identified 16 categories to prioritize for high return on development investment. Single topic question is at the top of the list. Training data for this category will be created to improve system accuracy. Table 4 reports the average DCG@3 for seven high priority question categories.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1016, |
| "end": 1023, |
| "text": "Table 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This paper presents a new legal domain QA system that is deployed and serving customers. Legal QA systems must address a specialized domain language and provide contextualized answers in a high-stakes setting. We were able to develop a performant product using a 10% content sample and 100 questions drawn from previously seen realworld queries supplemented with expert-generated questions. This proved effective to evaluate alternative embedding methods and performance tradeoffs for combination of fast retrieval system on all corpus and relatively slow re-ranking system on limited retrieved passages. The sampled collection and smaller question set enabled rapid design and test cycles. Later evaluation using the full collection and a much larger question set confirmed the 9.76 7 a Avg for answered questions in the category. findings. The system is oriented to answering content questions and performs lower on entity and analytic questions. We intend to address this limitation by developing alternative approaches to handle other question types that can be combined with this content-oriented system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Various development directions are being explored. Direct extensions of this work include use of a smaller Siamese BERT, vocabulary improvements for the models, training set enhancements targeting low performing legal areas, and training the answer finder component on augmented training data and smaller Legal-BERT models. additional filters are also implemented. Nonetheless, the method performance ranking is unchanged as compared to that using the smaller test collection and question sets. This confirms the usefulness of work with the smaller sets for development and tuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We also investigated answer variations for retrieval methods. Table 6 shows the distribution of surfaced answers, i.e. presented in the product UI, and customer-engaged answers by retrieval query method. Both retrieval methods provide passages recognized by the system as worthy answers. If an answer was retrieved by both retrieval methods there is a higher probability of user engagement. Figure 2 shows a screen-shot of a product UI for the production QA system. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 62, |
| "end": 69, |
| "text": "Table 6", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 391, |
| "end": 399, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "A LexisNexis headnote is a point of law expressed in a case written by a judge which is picked and edited by editors as a general point of law.3 An RFC (Reason For Citing) is an automatically extracted passage of a case which contains sentences near a document citation, such as a court case citation, that suggest the Reason(s) For Citing (RFC)(Humphrey et al., 2005).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The deference an appeals court will apply to a decision of a lower court http://cdn.ca9.uscourts. gov/datastore/uploads/guides/stand_of_ review/I_Definitions.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The complete production QA system includes important components not described here. It was developed by the authors and Shyjee Mathai, Aaron ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "The production QA system is evaluated periodically on the larger 1000 question set. These question collections were created by legal domain experts to be representative of various practice areas and different question types. Each experiment requires evaluation of tens of thousands of questionanswer pairs. Table 5 shows higher DCG compared to the internal evaluation because the production system is based on the complete content set. Some", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 307, |
| "end": 314, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Some Analysis on the Production System", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Analysis of factoid questions for effective relation extraction", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Agichtein", |
| "suffix": "" |
| }, |
| { |
| "first": "Silviu", |
| "middle": [], |
| "last": "Cucerzan", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Brill", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 28th annual international ACM SIGIR conference", |
| "volume": "", |
| "issue": "", |
| "pages": "567--568", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Agichtein, Silviu Cucerzan, and Eric Brill. 2005. Analysis of factoid questions for effective re- lation extraction. In Proceedings of the 28th annual international ACM SIGIR conference, pages 567- 568.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Prodromos Malakasiotis", |
| "authors": [ |
| { |
| "first": "Ilias", |
| "middle": [], |
| "last": "Chalkidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Manos", |
| "middle": [], |
| "last": "Fergadiotis", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Nikolaos Aletras, and Ion Androutsopoulos. 2020. Legal-bert: The muppets straight out of law school", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.02559" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilias Chalkidis, Manos Fergadiotis, Prodromos Malaka- siotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. Legal-bert: The muppets straight out of law school. arXiv preprint arXiv:2010.02559.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Reading wikipedia to answer opendomain questions", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the ACL", |
| "volume": "1", |
| "issue": "", |
| "pages": "1870--1879", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading wikipedia to answer open- domain questions. In Proceedings of the 55th An- nual Meeting of the ACL (Volume 1: Long Papers), pages 1870-1879.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Multi-step retriever-5 Asks about applicability of a legal concept on another one e.g. Is the continuous treatment doctrine applicable to negligence claims in Ohio? 6 The time limit for", |
| "authors": [ |
| { |
| "first": "Rajarshi", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Shehzaad", |
| "middle": [], |
| "last": "Dhuliawala", |
| "suffix": "" |
| }, |
| { |
| "first": "Manzil", |
| "middle": [], |
| "last": "Zaheer", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rajarshi Das, Shehzaad Dhuliawala, Manzil Zaheer, and Andrew McCallum. 2019. Multi-step retriever- 5 Asks about applicability of a legal concept on another one e.g. Is the continuous treatment doctrine applicable to negligence claims in Ohio? 6 The time limit for plaintiff to file a com- plaint https://www.nolo.com/dictionary/ statute-of-limitations-term.html 7 Essential requirements to make a claim https://www. law.cornell.edu/wex/element e.g. What are the el- ements of constructive eviction? reader interaction for scalable open-domain question answering. In International Conference on Learn- ing Representations.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the NAACL: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In Proceedings of the 2019 Conference of the NAACL: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Antique: A nonfactoid question answering benchmark", |
| "authors": [ |
| { |
| "first": "Helia", |
| "middle": [], |
| "last": "Hashemi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Aliannejadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hamed", |
| "middle": [], |
| "last": "Zamani", |
| "suffix": "" |
| }, |
| { |
| "first": "W Bruce", |
| "middle": [], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "European Conference on Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "166--173", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Helia Hashemi, Mohammad Aliannejadi, Hamed Za- mani, and W Bruce Croft. 2020. Antique: A non- factoid question answering benchmark. In Euro- pean Conference on Information Retrieval, pages 166-173. Springer.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Automated system and method for generating reasons that a court case is cited", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Timothy", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [ |
| "Allan" |
| ], |
| "last": "Humphrey", |
| "suffix": "" |
| }, |
| { |
| "first": "Afsar", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Salahuddin", |
| "middle": [], |
| "last": "Parhizgar", |
| "suffix": "" |
| }, |
| { |
| "first": "James S Wiltshire", |
| "middle": [], |
| "last": "Ahmed", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "T" |
| ], |
| "last": "Jr", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [ |
| "P" |
| ], |
| "last": "Morelock", |
| "suffix": "" |
| }, |
| { |
| "first": "Spiro", |
| "middle": [ |
| "G" |
| ], |
| "last": "Harmon", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Collias", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "US Patent", |
| "volume": "6", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy L Humphrey, Xin Allan Lu, Afsar Parhizgar, Salahuddin Ahmed, James S Wiltshire Jr, John T Morelock, Joseph P Harmon, Spiro G Collias, and Paul Zhang. 2005. Automated system and method for generating reasons that a court case is cited. US Patent 6,856,988.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A statistical interpretation of term specificity and its application in retrieval", |
| "authors": [ |
| { |
| "first": "Karen Sparck", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| } |
| ], |
| "year": 1972, |
| "venue": "Journal of documentation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karen Sparck Jones. 1972. A statistical interpretation of term specificity and its application in retrieval. Journal of documentation.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Understanding user query intent and target terms in legal domain", |
| "authors": [ |
| { |
| "first": "Sachin", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Politi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Applications of Natural Language to Information Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "41--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sachin Kumar and Regina Politi. 2019. Understanding user query intent and target terms in legal domain. In International Conference on Applications of Nat- ural Language to Information Systems, pages 41-53. Springer.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Natural questions: a benchmark for question answering research", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennimaria", |
| "middle": [], |
| "last": "Palomaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivia", |
| "middle": [], |
| "last": "Redfield", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Epstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Transactions of the ACL", |
| "volume": "7", |
| "issue": "", |
| "pages": "453--466", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Red- field, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a bench- mark for question answering research. Transactions of the ACL, 7:453-466.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "An introduction to latent semantic analysis", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Landauer", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Darrell", |
| "middle": [], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Laham", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Discourse processes", |
| "volume": "25", |
| "issue": "", |
| "pages": "259--284", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas K Landauer, Peter W Foltz, and Darrell Laham. 1998. An introduction to latent semantic analysis. Discourse processes, 25(2-3):259-284.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Sparse, dense, and attentional representations for text retrieval", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Transactions of the ACL", |
| "volume": "9", |
| "issue": "", |
| "pages": "329--345", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Luan, Jacob Eisenstein, Kristina Toutanova, and Michael Collins. 2021. Sparse, dense, and atten- tional representations for text retrieval. Transactions of the ACL, 9:329-345.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Westsearch plus: A non-factoid question-answering system for the legal domain", |
| "authors": [ |
| { |
| "first": "Gayle", |
| "middle": [], |
| "last": "Mcelvain", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Sanchez", |
| "suffix": "" |
| }, |
| { |
| "first": "Sean", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "" |
| }, |
| { |
| "first": "Don", |
| "middle": [], |
| "last": "Teo", |
| "suffix": "" |
| }, |
| { |
| "first": "Filippo", |
| "middle": [], |
| "last": "Pompili", |
| "suffix": "" |
| }, |
| { |
| "first": "Tonya", |
| "middle": [], |
| "last": "Custis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 42nd International ACM SIGIR Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1361--1364", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gayle McElvain, George Sanchez, Sean Matthews, Don Teo, Filippo Pompili, and Tonya Custis. 2019. Westsearch plus: A non-factoid question-answering system for the legal domain. In Proceedings of the 42nd International ACM SIGIR Conference, pages 1361-1364.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Passage re-ranking with bert", |
| "authors": [ |
| { |
| "first": "Rodrigo", |
| "middle": [], |
| "last": "Nogueira", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1901.04085" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rodrigo Nogueira and Kyunghyun Cho. 2019. Pas- sage re-ranking with bert. arXiv preprint arXiv:1901.04085.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language process- ing (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A summary of the coliee 2019 competition", |
| "authors": [ |
| { |
| "first": "Juliano", |
| "middle": [], |
| "last": "Rabelo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mi-Young", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Randy", |
| "middle": [], |
| "last": "Goebel", |
| "suffix": "" |
| }, |
| { |
| "first": "Masaharu", |
| "middle": [], |
| "last": "Yoshioka", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshinobu", |
| "middle": [], |
| "last": "Kano", |
| "suffix": "" |
| }, |
| { |
| "first": "Ken", |
| "middle": [], |
| "last": "Satoh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "JSAI International Symposium on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "34--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juliano Rabelo, Mi-Young Kim, Randy Goebel, Masa- haru Yoshioka, Yoshinobu Kano, and Ken Satoh. 2019. A summary of the coliee 2019 competition. In JSAI International Symposium on Artificial Intel- ligence, pages 34-49. Springer.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3973--3983", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 3973-3983.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Simple, proven approaches to text retrieval", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Stephen", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen Sp\u00e4rck Jones", |
| "middle": [], |
| "last": "Robertson", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen E Robertson and Karen Sp\u00e4rck Jones. 1994. Simple, proven approaches to text retrieval. Techni- cal report, University of Cambridge, Computer Lab- oratory.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Bidirectional attention flow for machine comprehension", |
| "authors": [ |
| { |
| "first": "Minjoon", |
| "middle": [], |
| "last": "Seo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aniruddha", |
| "middle": [], |
| "last": "Kembhavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Farhadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016. Bidirectional attention flow for machine comprehension. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Deep ensemble learning for legal query understanding", |
| "authors": [ |
| { |
| "first": "Arunprasath", |
| "middle": [], |
| "last": "Shankar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Venkata Nagaraju", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Buddarapu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of CIKM 2018 Workshop on Legal Data Analytics and Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arunprasath Shankar and Venkata Nagaraju Bud- darapu. 2018. Deep ensemble learning for legal query understanding. In Proceedings of CIKM 2018 Workshop on Legal Data Analytics and Mining (LeDAM 2018), CEUR-WS. org.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Building a question answering test collection", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Ellen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dawn", |
| "middle": [ |
| "M" |
| ], |
| "last": "Voorhees", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tice", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the 23rd annual international ACM SIGIR conference", |
| "volume": "", |
| "issue": "", |
| "pages": "200--207", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen M Voorhees and Dawn M Tice. 2000. Building a question answering test collection. In Proceedings of the 23rd annual international ACM SIGIR confer- ence, pages 200-207.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Retrieval and richness when querying by document", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Ophir", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Frieder", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Grossman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yurchak", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "DE-SIRES", |
| "volume": "", |
| "issue": "", |
| "pages": "68--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Yang, David D Lewis, Ophir Frieder, David A Grossman, and Roman Yurchak. 2018. Retrieval and richness when querying by document. In DE- SIRES, pages 68-75.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "End-to-end open-domain question answering with bertserini", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuqing", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Aileen", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingyu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Luchen", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kun", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the NAACL (Demonstrations)", |
| "volume": "", |
| "issue": "", |
| "pages": "72--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Yang, Yuqing Xie, Aileen Lin, Xingyu Li, Luchen Tan, Kun Xiong, Ming Li, and Jimmy Lin. 2019. End-to-end open-domain question answering with bertserini. In Proceedings of the 2019 Conference of the NAACL (Demonstrations), pages 72-77.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Systems and methods for paragraph-based document searching", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Steiner", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "US Patent", |
| "volume": "10", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Zhang and David Steiner. 2018. Systems and methods for paragraph-based document searching. US Patent 10,002,196.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Two stage architecture for QA systems plied in QA systems", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Customer facing application using the discussed components", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF0": { |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "Example question and passages (BM25_MLT: BM25 more-like-this, SL_BERT: Siamese Legal BERT, L_GloVe: Legal GloVe, AF: Answer Finder)" |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td>Method</td><td colspan=\"2\">DCG@3 a 95% C.I. b</td><td>N silly c</td><td colspan=\"2\">Answered d MRR@3 a</td></tr><tr><td>BM25_MLT</td><td>4.052</td><td>-</td><td>7</td><td>100</td><td>0.411</td></tr><tr><td>SL_BERT</td><td>3.386</td><td>1.26</td><td>2</td><td>100</td><td>0.326</td></tr><tr><td>L_GloVe</td><td>2.855</td><td>1.25</td><td>7</td><td>100</td><td>0.285</td></tr><tr><td>AF BM25</td><td>5.464</td><td>1.43</td><td>7</td><td>100</td><td>0.493</td></tr><tr><td>AF SL_BERT</td><td>4.862</td><td>1.43</td><td>0</td><td>100</td><td>0.416</td></tr><tr><td>AF L_GloVe</td><td>4.281</td><td>1.40</td><td>7</td><td>100</td><td>0.397</td></tr><tr><td>AF (BM25, SL_BERT)</td><td>5.605</td><td>1.47</td><td>5</td><td>100</td><td>0.483</td></tr><tr><td>AF (BM25, L_GloVe)</td><td>5.502</td><td>1.47</td><td>8</td><td>100</td><td>0.481</td></tr><tr><td colspan=\"2\">AF (BM25, SL_BERT, L_GloVe) 5.533</td><td>1.45</td><td>6</td><td>100</td><td>0.492</td></tr><tr><td>AF 0.2 (BM25, SL_BERT)</td><td>6.269</td><td>1.52</td><td>2</td><td>89</td><td>0.543</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "QA metrics for methods (100 questions)(BM25_MLT: BM25 more-like-this, SL_BERT: Siamese Legal BERT, L_GloVe: Legal GloVe, AF: Answer Finder, AF 0.2: Answer Finder as an answer filter with threshold 0.2)" |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td colspan=\"3\">: Seven high priority question categories on</td></tr><tr><td>1000 questions</td><td/><td/></tr><tr><td>Category</td><td>DCG@3 a</td><td>Priority</td></tr><tr><td>Single topic questions</td><td>6.99</td><td>1</td></tr><tr><td>Questions about rules</td><td>5.46</td><td>2</td></tr><tr><td>or statutes</td><td/><td/></tr><tr><td colspan=\"2\">Relationship questions 5 7.58</td><td>3</td></tr><tr><td>Definitions</td><td>7.11</td><td>4</td></tr><tr><td>Statute of limitations 6</td><td>9.23</td><td>5</td></tr><tr><td>Standard of review</td><td>11.52</td><td>6</td></tr><tr><td>Elements 7</td><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "" |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>Method</td><td>Avg</td></tr><tr><td/><td>DCG@3</td></tr><tr><td>BM25_MLT</td><td>4.82</td></tr><tr><td>SL_BERT</td><td>3.62</td></tr><tr><td>AF on BM25</td><td>6.27</td></tr><tr><td>AF on SL_BERT</td><td>5.18</td></tr><tr><td>AF on BM25 + SL_BERT</td><td>6.28</td></tr><tr><td>AF on BM25 + L_GloVe</td><td>6.18</td></tr><tr><td>AF 0.3 on BM25 + SL_BERT</td><td>7.75</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "Production system evaluation (1000 questions) (April 2020)" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td colspan=\"3\">: Answer distribution by retrieval methods (May</td></tr><tr><td>2021)</td><td/><td/></tr><tr><td colspan=\"3\">Answers BM25_MLT Si_L_Bert Both</td></tr><tr><td>surfaced 58.85%</td><td>31.53%</td><td>9.62%</td></tr><tr><td>engaged 56.48%</td><td>29.02%</td><td>14.50%</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "" |
| } |
| } |
| } |
| } |