| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T02:10:54.044243Z" |
| }, |
| "title": "Information Extraction from Legal Documents: A Study in the Context of Common Law Court Judgements", |
| "authors": [ |
| { |
| "first": "Meladel", |
| "middle": [], |
| "last": "Mistica", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": { |
| "country": "Australia" |
| } |
| }, |
| "email": "misticam@unimelb.edu.au" |
| }, |
| { |
| "first": "Geordie", |
| "middle": [ |
| "Z" |
| ], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "geordie.zhang@unimelb.edu.au" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Chia", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "chia.h@unimelb.edu.au" |
| }, |
| { |
| "first": "Kabir", |
| "middle": [], |
| "last": "Manandhar", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "kmanandharsh@student.unimelb.edu.au" |
| }, |
| { |
| "first": "Shrestha", |
| "middle": [ |
| "\u2665" |
| ], |
| "last": "Rohit", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Kumar", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Saket", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "saketk@student.unimelb.edu.au" |
| }, |
| { |
| "first": "Jeannie", |
| "middle": [ |
| "Marie" |
| ], |
| "last": "Paterson", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "jeanniep@unimelb.edu.au" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": { |
| "country": "Australia" |
| } |
| }, |
| "email": "tbaldwin@unimelb.edu.au" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Beck", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": { |
| "country": "Australia" |
| } |
| }, |
| "email": "d.beck@unimelb.edu.au" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Jowett", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "'Common Law' judicial systems follow the doctrine of precedent, which means the legal principles articulated in court judgements are binding in subsequent cases in lower courts. For this reason, lawyers must search prior judgements for the legal principles that are relevant to their case. The difficulty for those within the legal profession is that the information that they are looking for may be contained within a few paragraphs or sentences, but those few paragraphs may be buried within a hundred-page document. In this study, we create a schema based on the relevant information that legal professionals seek within judgements and perform text classification based on it, with the aim of not only assisting lawyers in researching cases, but eventually enabling large-scale analysis of legal judgements to find trends in court outcomes over time.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "'Common Law' judicial systems follow the doctrine of precedent, which means the legal principles articulated in court judgements are binding in subsequent cases in lower courts. For this reason, lawyers must search prior judgements for the legal principles that are relevant to their case. The difficulty for those within the legal profession is that the information that they are looking for may be contained within a few paragraphs or sentences, but those few paragraphs may be buried within a hundred-page document. In this study, we create a schema based on the relevant information that legal professionals seek within judgements and perform text classification based on it, with the aim of not only assisting lawyers in researching cases, but eventually enabling large-scale analysis of legal judgements to find trends in court outcomes over time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The law is reason free from passion 1 -but you'll have to dig through hundreds of pages to find it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In common law countries such as Australia, a core legal principle is the doctrine of precedentevery court judgement contains legal rulings that are binding upon subsequent cases in lower courts, though how legal rulings apply in subsequent cases is dependent on the facts of the case. When preparing to give a legal opinion or argue a case, lawyers spend many long hours reading lengthy judgements to identify therein the precedents that are salient to the case at hand. This time-consuming manual process has formed a barrier to large-scale analysis of legal judgements. Even though thousands of court judgements are published in Australia every year, 2 lawyers are only able to analyse small numbers of judgements, potentially missing broader trends hidden in the vast numbers of judgements that are published by the courts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There is a growing body of research at the intersection of Law and Natural Language Processing, including prediction of court opinion about a case (Chalkidis et al., 2019a; Aletras et al., 2016) , classification of legal text by legal topics or issues (Soh et al., 2019; Chalkidis et al., 2019b) , and legal entity recognition (Cardellino et al., 2017) . However, our ultimate goal is to assist lawyers in identifying sections of judgements relevant to their case at hand, as well as bulk analysis of cases to identify relationships between factual patterns and decision outcomes. For this reason, we model our initial study on the sentence-by-sentence identification of argumentation zones within academic and scientific texts (Teufel et al., 2009; Guo et al., 2010) . However, these zoning papers do not account for the complex document structure of legal judgements, which have the potential to be structured as multiple sub-documents within the one court decision (see Section 3).", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 172, |
| "text": "(Chalkidis et al., 2019a;", |
| "ref_id": null |
| }, |
| { |
| "start": 173, |
| "end": 194, |
| "text": "Aletras et al., 2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 252, |
| "end": 270, |
| "text": "(Soh et al., 2019;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 271, |
| "end": 295, |
| "text": "Chalkidis et al., 2019b)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 327, |
| "end": 352, |
| "text": "(Cardellino et al., 2017)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 728, |
| "end": 749, |
| "text": "(Teufel et al., 2009;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 750, |
| "end": 767, |
| "text": "Guo et al., 2010)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The overall goal of the project is to automate the extraction of information from legal judgements, to assist lawyers to more easily and quickly identify the type of information that they are looking for from a large number of judgements. The project also aims to enable the large-scale analysis of judgements by legal researchers in order to identify trends or patterns that may be occurring within judgments, for example identifying patterns of facts that lead to particular results. This kind of analysis is relevant in predicting the outcome of complex cases and may also inform law reform. This part of the study reports on the initial phase of experimenting with the granularity of the annotation labels in developing our schema, as well as our initial experiments in automatically identifying these labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Legal research as a broad term can include any form of research that is undertaken for the purpose of advancing legal advice, litigation or law reform, and can include research activities such as community surveys, comparative studies of legislation and the study of court judgments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This project focuses solely on the activity of studying court judgments, as it is a crucial component of legal research in common law countries. For lawyers and legal researchers, court judgments are a key source of data for the purpose of legal research, though legal research in general can encompass other sources of data, such as legislation, international treaties, government reports etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "When lawyers or legal researchers read a court judgment, what they are looking for is observations, opinions or decisions that the judge has made about how the law should be interpreted and applied in the particular context of the case before it. For example, what are the rules to resolve conflict between competing values, or what are the rules for resolving ambiguities of the meaning of a word in legislation? These observations, opinions and decisions by judges can be conceptualised as \"law data\" -data that legal researchers collect in order to understand how laws are being applied by courts to specific factual patterns and to predict how it may be applied in future scenarios.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Collecting data about how laws are interpreted is important at both the individual and the societal level. At the individual level, much of a lawyer's work is advising clients on what they need to do to comply with the law. Lawyers will research past court judgments to collect data about how the law has been interpreted in similar factual situations, in order to make an informed opinion about how the law is likely to be applied to the case at hand. At the societal level, legal researchers in academia, regulatory agencies and government collect data on how laws are being interpreted and applied to specific facts, in order to assess whether laws are delivering the desired social outcomes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The field of legal research has conventionally relied mostly on qualitative data, and if there is quantitative data it is usually at a small scale. The reason for this is because \"law data\" is expressed in court judgments that are generally very long and complex free-form text. The only method for collecting \"law data\" has been through the manual reading of legal judgments by people with legal expertise. This is a very time-consuming process and therefore legal research has generally had to rely on small quantities of data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The contribution that NLP can make to the legal field is to enable the automatic extraction of \"law data\" from court judgements, to increase the number of court judgments that legal researchers can analyse. The challenge for this project has been the novelty of the task of extracting complex data from court judgments. There is no established schema for extracting information from court judgments. The schema proposed in this study is the result of a multi-disciplinary approach to merging the categories of data that are useful to legal researchers and lawyers, with the categories of information that can be accurately labelled using text classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We developed our initial proof-of-concept corpus from court judgements from the High Court of Australia, 3 which is the highest court in the Australian judicial system hierarchy. A court case may be decided by a single judge or a group of judges. In the case of a single judge, the court judgement is single-authored with one voice. When there are multiple judges, they can write a single judgement as a group, particularly if they are in agreement, or they can give separate reasons. In the latter case, the court judgement will then consist of multiple sets of reasons, structured as sub-components from the different judges, which together make up the entire judgement for that court case. To legal domain experts, there are general patterns or sequences by which different types of information tend to appear within a judgement. However there is a high degree of variation between court judgements according to the writing style of the judge. For instance, one common document pattern begins with the explanation of the facts of the case, followed by the reasoning on how the rele-LABEL DESCRIPTION", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 106, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Development", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Specific facts of that case, e.g. The applicant entered Australia as an unauthorised maritime arrival on 5 September 2011.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FACT", |
| "sec_num": null |
| }, |
| { |
| "text": "Legal principles considered, e.g. The question that arises is whether the Tribunal failed to consider that the applicant faced a real probability of irreparable harm. CONCLUSION Outcome of the case, e.g. The Tribunal committed a jurisdictional error, the appeal should be allowed. vant legal principles were applied, and then ending with their conclusion. But this is not always the case. Some judges will state their conclusions at the beginning, and then provide a detailed examination of the facts and legal reasoning. Where there are multiple sets of reasons within a single judgement, each set of reasons will have its own structure particular to that judge's writing style. We limit our corpus to immigration law cases, and randomly selected 55 of these High Court judgements. These 55 documents contain over 9.5K sentences in total. Each of them was annotated at the sentence level with either FACT, REA-SONING or CONCLUSION, which capture different aspects of the case as shown in Figure 1 . In this initial corpus, REASONING made up half of the labelled sentences. Of the remaining sentences, three quarters were labelled FACT, and one quarter CONCLUSION. The FACT and CONCLUSION segments of the case are usually what lawyers are most interested in. These portions of the document (judgement) contain unique details pertaining to the case, while the REASONING category is a combination of original insights of this case and a recapitulation of previous relevant judgements.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 989, |
| "end": 997, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "REASONING", |
| "sec_num": null |
| }, |
| { |
| "text": "Annotation For the annotations, we had 1 primary annotator (ANNOTATOR A), a qualified lawyer and legal researcher, who marked up all of the sampled High Court judgements. ANNO-TATOR A had a label distribution of FACT: 38%, REASONING: 50%, and CONCLUSION: 12%. We also had 2 secondary annotators (ANNOTATORS B and C): the first is a practising immigration lawyer, and the second has some legal training, but is not a fully qualified lawyer. We randomly selected 3 documents (judgements) for the secondary annotators to mark up. This made up 5% of the number of sentences of the whole corpus. Of those sentences, there were no three-way disagreements between the annotators. The Cohen's kappa (\u03ba) between all three annotators shows very good 2-way agreement between all pairs of annotators. The inter-annotator agreement between A-B and B-C were 0.70, and between A-C was 0.73. A large majority of the 2-way disagreements involved REASONING, with 81.5% of the disagreements being REASONINGvs-FACT and REASONING-vs-CONCLUSION, split roughly 50:50.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "REASONING", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to assess the feasibility of using our corpus in a supervised setting, we perform experiments using a range of different models for sentence-level classification. The goal is to have a reasonable understanding of how difficult the task is, both in terms of our initial schema and training data size. Data Processing Although the task is modelled at the sentence level, the corpus was split at the document-level for training, validation, and testing. This set-up emulates the real-world setting, where new documents are classified as a whole. We use a 80%:10%:10% split for training, development and testing (corresponding to 44:5:6 documents and 3000:1200:800 sentences, respectively). Since there is a smaller number of CONCLUSION sentences in court judgements, we perform undersampling over the training data only, by randomly deleting samples from the other majority classes to balance the number of training instances across the three labels. Note that this was performed for the training set only, and the development and testing sets were left untouched.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Methods As two baselines, we use: (1) a majority-class classifier, based on the training data; and (2) the NBSVM model proposed by Wang and Manning (2012) , which combines a naive Bayes model with a support vector machine, using a bagof-words text representation. We compare this with a set of pre-trained language models, namely BERT (Devlin et al., 2019) , RoBERTa (Liu et al., 2019) and XLNet (Yang et al., 2019) . We employ similar structures for these models: 12 layers of transformer blocks, a hidden layer size of 768d, and 12 attention heads. All models are trained by adding a single hidden layer with softmax output.", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 154, |
| "text": "Wang and Manning (2012)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 335, |
| "end": 356, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 367, |
| "end": 385, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 396, |
| "end": 415, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Initial Results We evaluate our models using Precision, Recall, and Macro-averaged and Microaveraged F1, showing the results in Table 1 . The NBSVM model outperforms the majority class baseline by 0.30 in Macro F1. Using a pre-trained model further improves the performance, with XL-Net increasing Macro F1 by 0.11 over the NBSVM baseline, and achieving the best results. While this is expected, since these models have been pretrained over large amounts of textual data, it is still remarkable given how domain-specific court judgements are.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 128, |
| "end": 135, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "5", |
| "sec_num": null |
| }, |
| { |
| "text": "Incorporating Context While our initial results are promising, at 0.66 Macro F1 they still result in many errors. This undermines the potential of our approach to be deployed in real-world scenarios.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "5", |
| "sec_num": null |
| }, |
| { |
| "text": "In the remaining experiments, we explore a few approaches to improve performance, focusing on XLNet since it was our best model in the initial experiments. One hypothesis is that the label of a sentence is affected by its context in the document. This is directly reflected in the annotation procedure, since annotators have access to the full document when labelling sentences. In order to test this hypothesis, we prepend each sentence with its two previous 5 We refer the reader to the original papers from each model for details of the architecture and model pre-training. Table 3 : Results for XLNet context with Sentential Context but without Undersampling sentences in the document, and feed the sequence of three sentences into XLNet as input.", |
| "cite_spans": [ |
| { |
| "start": 460, |
| "end": 461, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 577, |
| "end": 584, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "5", |
| "sec_num": null |
| }, |
| { |
| "text": "We show the results of this approach in Table 2 , comparing with the XLNet model used in the initial experiments without sentential context. We also break down the results across the three individual classes, to get a better understanding of any differences in performance. Overall, adding context greatly improves the performance in detecting FACT and CONCLUSION sentences, reaching an overall Macro F1 of 0.76 and Micro F1 of 0.79, a 0.10 and 0.07 improvement over the single sentence model, respectively. Interestingly, adding context does not seem to affect REASONING sentences much, with a small decrease in Recall. This could be evidence that REASONING sentences can be detected only by local content within the sentence, without necessarily requiring extra-sentential context.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 40, |
| "end": 47, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "5", |
| "sec_num": null |
| }, |
| { |
| "text": "We also investigated the impact of undersampling the training data. Our motivation for undersampling is the unbalanced nature of the dataset, where around half of the sentences are labelled as REASONING. This is an issue since, as explained in Section 3, legal experts are mostly interested in FACT and CONCLUSION sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Undersampling", |
| "sec_num": null |
| }, |
| { |
| "text": "In Table 3 we present the results for XLNet context without undersampling, to compare against the original results in (the bottom half of) Table 2 with undersampling. The results show a drop in recall for CONCLUSION, which was expected, while improving the recall for REASONING. FACT, however, was largely unaffected. Note that recall is particularly critical in our use case, in highlighting potential FACT and CONCLUSION sentences to our legal expert.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 139, |
| "end": 146, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Undersampling", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, we have presented the preliminary investigations of our interdisciplinary collaboration. The main focus was to scope out the areas in which NLP can assist in the task of interpreting legal judgments -a task that every lawyer must do in researching a case. The main contribution of this paper is developing and testing the annotation schema. In future work, we aim to extract trends over time for a given aspect of the annotation, e.g. how the presentation REASONING changes over time as new cases are judged with each new CON-CLUSION. Given that Australia has a common law system, these judgements in effect shape the interpretation and understanding of the law and set a precedence for subsequent cases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The results of the sentence-level text classification are promising despite the inherent confusability within the REASONING class: even professional lawyers with years of training can disagree in ascertaining whether a sentence is indeed a REASON-ING rather than a CONCLUSION or in some cases a REASONING or a FACT sentence, as there can be elements of either within a REASONING sentence. Although the results do show promise, in future work, we intend to experiment with the annotation schema to explore more detailed sub-categories under REASONING. This will assist us in identifying more targeted zones within the judgements, which may better assist in legal information extraction tasks, and in better characterising the structure of these legal documents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "From an application perspective, we plan to test the newly released LegalBERT (Chalkidis et al., 2020) and compare this to our adaptation of a domain-specific BERT and XLNet for legal texts. We note that LegalBERT was pre-trained on a variety of legal texts that are different from the legal texts in our database, which consisted solely of Australian court judgments. The data used to pretrain LegalBERT included legislation and contracts, which are different to court judgments in terms of structure and content. Also, the data used to pretrain LegalBERT was from multiple legal jurisdictions, being the United States, United Kingdom and Europe, with each jurisdiction having unique nuances to the language used in its legal texts. Given these differences between our data and the training data of LegalBERT, it remains an open question as to whether LegalBERT would have any advantage over BERT, and whether a custom-tuned BERT for our purposes may be more advantageous.", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 102, |
| "text": "(Chalkidis et al., 2020)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For example, the Federal Court of Australia alone publishes around 1700-2500 judgements per year.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.hcourt.gov.au/ publications/judgements", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We note that the dataset will not be made publicly available because the project team does not have the right to publish this data. Whilst court judgments are in the public domain, there are copyright restrictions on republication. Republication of court judgments in an altered form, which our labelled dataset would be, is not allowed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported in part by the Melbourne Data Analytics Platform. We are grateful for the discussions and suggestions from Priyanka Pillai, Emily Fitzgerald, Daniel Russo-Batterham, Kim Doyle, and Andrew Turpin in the shaping of this project. The authors would also like to thank the reviewers for their constructive feedback.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Predicting judicial decisions of the European Court of Human Rights: A natural language processing perspective", |
| "authors": [ |
| { |
| "first": "Nikolaos", |
| "middle": [], |
| "last": "Aletras", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitrios", |
| "middle": [], |
| "last": "Tsarapatsanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Preo\u0163iuc-Pietro", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Lampos", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "PeerJ Computer Science", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolaos Aletras, Dimitrios Tsarapatsanis, Daniel Preo\u0163iuc-Pietro, and Vasileios Lampos. 2016. Pre- dicting judicial decisions of the European Court of Human Rights: A natural language processing per- spective. PeerJ Computer Science, 2:e93.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Legal NERC with ontologies, Wikipedia and curriculum learning", |
| "authors": [ |
| { |
| "first": "Cristian", |
| "middle": [], |
| "last": "Cardellino", |
| "suffix": "" |
| }, |
| { |
| "first": "Milagro", |
| "middle": [], |
| "last": "Teruel", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [ |
| "Alonso" |
| ], |
| "last": "Alemany", |
| "suffix": "" |
| }, |
| { |
| "first": "Serena", |
| "middle": [], |
| "last": "Villata", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "254--259", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cristian Cardellino, Milagro Teruel, Laura Alonso Ale- many, and Serena Villata. 2017. Legal NERC with ontologies, Wikipedia and curriculum learning. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Lin- guistics: Volume 2, Short Papers, pages 254-259, Valencia, Spain. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural legal judgment prediction in English", |
| "authors": [], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4317--4323", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1424" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilias Chalkidis, Ion Androutsopoulos, and Nikolaos Aletras. 2019a. Neural legal judgment prediction in English. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 4317-4323, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Extreme multi-label legal text classification: A case study in EU legislation", |
| "authors": [ |
| { |
| "first": "Ilias", |
| "middle": [], |
| "last": "Chalkidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanouil", |
| "middle": [], |
| "last": "Fergadiotis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Natural Legal Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "78--87", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-2209" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilias Chalkidis, Emmanouil Fergadiotis, Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androut- sopoulos. 2019b. Extreme multi-label legal text clas- sification: A case study in EU legislation. In Pro- ceedings of the Natural Legal Language Process- ing Workshop 2019, pages 78-87, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos", |
| "authors": [ |
| { |
| "first": "Ilias", |
| "middle": [], |
| "last": "Chalkidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Manos", |
| "middle": [], |
| "last": "Fergadiotis", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "2898--2904", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilias Chalkidis, Manos Fergadiotis, Prodromos Malaka- siotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. LEGAL-BERT: The muppets straight out of law school. In Findings of the Association for Com- putational Linguistics: EMNLP 2020, pages 2898- 2904, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Identifying the information structure of scientific abstracts: An investigation of three different schemes", |
| "authors": [ |
| { |
| "first": "Yufan", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilona", |
| "middle": [], |
| "last": "Silins", |
| "suffix": "" |
| }, |
| { |
| "first": "Lin", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulla", |
| "middle": [], |
| "last": "Stenius", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Workshop on Biomedical Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "99--107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yufan Guo, Anna Korhonen, Maria Liakata, Ilona Silins, Lin Sun, and Ulla Stenius. 2010. Identifying the information structure of scientific abstracts: An investigation of three different schemes. In Proceed- ings of the 2010 Workshop on Biomedical Natural Language Processing, pages 99-107, Uppsala, Swe- den. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "RoBERTa: A robustly optimized BERT pretraining approach", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Y. Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, M. Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A robustly optimized BERT pretraining approach. ArXiv, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Legal area classification: A comparative study of text classifiers on Singapore Supreme Court judgments", |
| "authors": [ |
| { |
| "first": "Jerrold", |
| "middle": [], |
| "last": "Soh", |
| "suffix": "" |
| }, |
| { |
| "first": "Khang", |
| "middle": [], |
| "last": "How", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [ |
| "Ernst" |
| ], |
| "last": "Lim", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chai", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Natural Legal Language Processing Workshop 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "67--77", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-2208" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerrold Soh, How Khang Lim, and Ian Ernst Chai. 2019. Legal area classification: A comparative study of text classifiers on Singapore Supreme Court judgments. In Proceedings of the Natural Legal Language Processing Workshop 2019, pages 67-77, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Towards domain-independent argumentative zoning: Evidence from chemistry and computational linguistics", |
| "authors": [ |
| { |
| "first": "Simone", |
| "middle": [], |
| "last": "Teufel", |
| "suffix": "" |
| }, |
| { |
| "first": "Advaith", |
| "middle": [], |
| "last": "Siddharthan", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Batchelor", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1493--1502", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simone Teufel, Advaith Siddharthan, and Colin Batch- elor. 2009. Towards domain-independent argumen- tative zoning: Evidence from chemistry and compu- tational linguistics. In Proceedings of the 2009 Con- ference on Empirical Methods in Natural Language Processing, pages 1493-1502, Singapore. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Baselines and bigrams: Simple, good sentiment and topic classification", |
| "authors": [ |
| { |
| "first": "Sida", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "90--94", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sida Wang and Christopher Manning. 2012. Baselines and bigrams: Simple, good sentiment and topic clas- sification. In Proceedings of the 50th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 90-94, Jeju Island, Korea. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "XLNet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Russ", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "5753--5763", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. XLNet: Generalized autoregressive pre- training for language understanding. In H. Wal- lach, H. Larochelle, A. Beygelzimer, F. d'Alch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 32, pages 5753- 5763. Curran Associates, Inc.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Description of the Label Set", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td colspan=\"4\">: Results for XLNet without & with Sentential</td></tr><tr><td colspan=\"4\">Context (Prepending the Previous Two Sentences)</td></tr><tr><td>Model</td><td>Class</td><td>P</td><td>R F1</td></tr><tr><td/><td colspan=\"3\">CONCLUSION .71 .57 .63</td></tr><tr><td>XLNet context</td><td>FACT</td><td colspan=\"2\">.85 .85 .85</td></tr><tr><td/><td>REASONING</td><td colspan=\"2\">.83 .87 .85</td></tr></table>", |
| "html": null, |
| "text": "" |
| } |
| } |
| } |
| } |