File size: 8,814 Bytes
5879764 845b7b8 5879764 90fc88e 9fb449d 90fc88e c3bf54c 5879764 c3bf54c a76a5da 5879764 c3bf54c a76a5da 5879764 65b244c 912d215 65b244c 912d215 12fd6c9 912d215 65b244c 912d215 5879764 65b244c 5879764 65b244c 912d215 5879764 45aedbe 5879764 c3bf54c a76a5da 5879764 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import datasets
import pandas as pd
# Dataset metadata
_CITATION = """"""
_DESCRIPTION = """"""
_HOMEPAGE = ""
_LICENSE = ""
#_URLS = {
#"train": f"data/automated-tasks-subsample/{dataset_prefix}-subsample/{task_name}/{task_name}_train.csv",
#"validation": f"data/automated-tasks-subsample/{dataset_prefix}-subsample/{task_name}/{task_name}_validation.csv",
#"test": f"data/automated-tasks-subsample/{dataset_prefix}-subsample/{task_name}/{task_name}_test.csv",
#}
TASK_NAMES = [
'ATS-ATS Judgment Categories','ATS-Did the Court find ATS Jurisdiction',
'ATS-Favorable Judgment for at least one Plaintiff affecting the ATS claim',
'ATS-Is at least one defendant a corporation',
'Chevron-Ag. Format', 'Chevron-Ag. Interp', 'Chevron-Ag. Iss 3', 'Chevron-Ag. Iss. 1',
'Chevron-Ag. Iss. 2', 'Chevron-Ag. Iss. 4', 'Chevron-Agency', 'Chevron-ChevCited', 'Chevron-Chevron',
'Chevron-Chief', 'Chevron-Concurr', 'Chevron-Dec. Ag. ', 'Chevron-Dec. Ov. ', 'Chevron-Deference', 'Chevron-Dissents',
'Chevron-Notice & Comment', 'Chevron-Outcome', 'Chevron-Step 0', 'Chevron-Step 1', 'Chevron-Step 2', 'Chevron-Subject',
'Chevron-Type Del. ', 'Chevron-Unan',
'CoA-abusedis', 'CoA-adminrev', 'CoA-alj', 'CoA-altdisp', 'CoA-amicus', 'CoA-appbus', 'CoA-appfed', 'CoA-appfiduc', 'CoA-applfrom',
'CoA-appnatpr', 'CoA-appnonp', 'CoA-appstate', 'CoA-appsubst', 'CoA-attyfee', 'CoA-bank_ap1', 'CoA-bank_r1', 'CoA-capric',
'CoA-casetyp1', 'CoA-circuit', 'CoA-classact', 'CoA-concur', 'CoA-confess', 'CoA-constit', 'CoA-counsel', 'CoA-counsel1',
'CoA-counsel2', 'CoA-crossapp', 'CoA-day', 'CoA-direct1', 'CoA-discover', 'CoA-dissent', 'CoA-district', 'CoA-dueproc',
'CoA-entrap', 'CoA-erron', 'CoA-execord', 'CoA-exhaust', 'CoA-fedlaw', 'CoA-fedvst', 'CoA-genapel1', 'CoA-geniss', 'CoA-genresp1',
'CoA-genstand', 'CoA-habeas', 'CoA-immunity', 'CoA-improper', 'CoA-indict', 'CoA-initiate', 'CoA-injunct', 'CoA-interven',
'CoA-judgdisc', 'CoA-juris', 'CoA-juryinst', 'CoA-majvotes', 'CoA-method', 'CoA-month', 'CoA-notice', 'CoA-numappel', 'CoA-numresp',
'CoA-opinstat', 'CoA-origin', 'CoA-othadmis', 'CoA-othjury', 'CoA-plea', 'CoA-post_trl', 'CoA-prejud', 'CoA-pretrial', 'CoA-procdis',
'CoA-procedur', 'CoA-r_bus', 'CoA-r_fed', 'CoA-r_fiduc', 'CoA-r_natpr', 'CoA-r_nonp', 'CoA-r_state', 'CoA-r_stid', 'CoA-r_subst',
'CoA-realapp', 'CoA-realresp', 'CoA-rtcouns', 'CoA-search', 'CoA-sentence', 'CoA-source', 'CoA-standing', 'CoA-state', 'CoA-statecl',
'CoA-stpolicy', 'CoA-subevid', 'CoA-suffic', 'CoA-summary', 'CoA-timely', 'CoA-treat', 'CoA-trialpro', 'CoA-typeiss', 'CoA-weightev',
'DC-casetype', 'DC-category', 'DC-circuit', 'DC-libcon', 'DC-month', 'DC-statdist', 'DC-state', 'DC-year',
'JRC-AREA1', 'JRC-ATT GEN', 'JRC-CERT', 'JRC-DECISION', 'JRC-DECISION2', 'JRC-DISSENT', 'JRC-GVT PRTY', 'JRC-REVERSD',
'SC-adminAction', 'SC-authorityDecision1', 'SC-caseDisposition', 'SC-caseOrigin', 'SC-caseOriginState', 'SC-caseSource',
'SC-caseSourceState', 'SC-certReason', 'SC-decisionDirection', 'SC-decisionDirectionDissent', 'SC-decisionType',
'SC-declarationUncon', 'SC-issue', 'SC-issueArea', 'SC-jurisdiction', 'SC-lawType', 'SC-lcDisagreement', 'SC-lcDisposition',
'SC-lcDispositionDirection', 'SC-majOpinWriter', 'SC-majVotes', 'SC-minVotes', 'SC-partyWinning', 'SC-petitioner',
'SC-precedentAlteration', 'SC-respondent', 'SC-respondentState', 'SC-threeJudgeFdc', 'SC-voteUnclear',
'SSC-agency', 'SSC-agency_r', 'SSC-amicus', 'SSC-arson', 'SSC-assaulta', 'SSC-burglary', 'SSC-ca_atty', 'SSC-ca_capo',
'SSC-ca_conv', 'SSC-ca_cruel', 'SSC-ca_disc', 'SSC-ca_disp', 'SSC-ca_doubj', 'SSC-ca_ev_m', 'SSC-ca_ev_w', 'SSC-ca_gjury',
'SSC-ca_insan', 'SSC-ca_jr_in', 'SSC-ca_jr_sl', 'SSC-ca_majfm', 'SSC-ca_opnfm', 'SSC-ca_plea', 'SSC-ca_prej', 'SSC-ca_race',
'SSC-ca_recus', 'SSC-ca_self', 'SSC-ca_sent', 'SSC-ca_sento', 'SSC-ca_serch', 'SSC-ca_sevr', 'SSC-ca_speed', 'SSC-ca_stc',
'SSC-ca_stcty', 'SSC-ca_stdc', 'SSC-ca_suff', 'SSC-ca_tot_c', 'SSC-ca_tot_i', 'SSC-ca_trial', 'SSC-ca_usc', 'SSC-ca_uscdc',
'SSC-ca_uscty', 'SSC-ca_venue', 'SSC-ca_winp', 'SSC-crossapp', 'SSC-death_c', 'SSC-death_im', 'SSC-dec1_day', 'SSC-dec1_mo',
'SSC-dec1_yr', 'SSC-decs_day', 'SSC-decs_mo', 'SSC-decs_yr', 'SSC-disorder', 'SSC-docket_n', 'SSC-drugabus',
'SSC-drugsell', 'SSC-dui', 'SSC-enbanc', 'SSC-fam_kids', 'SSC-first_ct', 'SSC-fraud', 'SSC-juris', 'SSC-kidnap', 'SSC-mans_neg',
'SSC-mans_non', 'SSC-multi_p', 'SSC-multi_r', 'SSC-murder', 'SSC-o_defend', 'SSC-o_plain', 'SSC-p1_persn', 'SSC-p1_sgov',
'SSC-parole', 'SSC-r1_persn', 'SSC-rape', 'SSC-rev_ct', 'SSC-robbery', 'SSC-sex_gen', 'SSC-stolen', 'SSC-theft', 'SSC-traffic',
'SSC-type_p1', 'SSC-type_p2'
]
_CONFIGS = {
task_name: {
"description": f"{task_name} specific legal opinions", # Dynamic description based on task name
"features": {
"idx": datasets.Value("string"),
"Citation": datasets.Value("string"),
"Full Case Name": datasets.Value("string"),
"Opinion Text": datasets.Value("string"),
"Numerical Label": datasets.Value("string"), # Will be optional for some tasks
"Text Label": datasets.Value("string"), # Will be optional for some tasks
#"DC Numerical Label": datasets.Value("string")
#"Syllabus": datasets.Value("string") # Will be optional for some tasks
},
}
for task_name in TASK_NAMES
}
class LongConLDataset(datasets.GeneratorBasedBuilder):
"""Legal opinion classification dataset for LongConL tasks"""
def _info(self):
"""Return dataset information."""
features = datasets.Features({
"idx": datasets.Value("string"),
"Citation": datasets.Value("string"),
"Full Case Name": datasets.Value("string"),
"Opinion Text": datasets.Value("string"),
"Numerical Label": datasets.Value("string"), # Will be optional for some tasks
"Text Label": datasets.Value("string"), # Will be optional for some tasks
#"DC Numerical Label": datasets.Value("string")
#"Syllabus": datasets.Value("string") # Will be optional for some tasks
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
task_name = self.config.name
dataset_prefix = task_name.split("-")[0] # e.g., 'SC', 'CoA', 'SSC', etc.
urls = {
"train": f"data/automated-tasks-subsample/{dataset_prefix}-subsample/{task_name}/{task_name}_train.csv",
"validation": f"data/automated-tasks-subsample/{dataset_prefix}-subsample/{task_name}/{task_name}_val.csv",
"test": f"data/automated-tasks-subsample/{dataset_prefix}-subsample/{task_name}/{task_name}_test.csv",
}
downloaded_files = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": downloaded_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"file_path": downloaded_files["validation"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"file_path": downloaded_files["test"]},
),
]
def _generate_examples(self, file_path):
"""Generate examples from the dataset CSV."""
data = pd.read_csv(file_path)
print("Loaded file path:", file_path)
print("Shape of data:", data.shape)
print(data.head()) # Display first few rows
data_dict = data.to_dict(orient="records")
print(f"Number of examples to generate: {len(data_dict)}")
for id_, row in enumerate(data_dict):
yield id_, {
"idx": row["idx"],
"Citation": row["Citation"],
"Full Case Name": row["Full Case Name"],
"Opinion Text": row["Opinion Text"],
"Numerical Label": row.get("Numerical Label", None), # Use .get() to handle missing keys
"Text Label": row["Text Label"],
#"DC Numerical Label": row["DC Numerical Label"]
#"Syllabus": row["Syllabus"]
}
# Use a dynamic config
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=task_name, version=datasets.Version("1.0.0"), description=task_name)
for task_name in TASK_NAMES
]
|