Datasets:

License:
OpinionBench / OpinionBench.py
srzhang's picture
Rename LongConL.py to OpinionBench.py
15c7732 verified
raw
history blame
5.37 kB
import datasets
import pandas as pd
# Dataset metadata
_CITATION = """"""
_DESCRIPTION = """"""
_HOMEPAGE = ""
_LICENSE = ""
# Updated URLs to dynamically handle task names
_URLS = {
"train": "https://huggingface.co/datasets/reglab/OpinionBench/resolve/main/data/automated-tasks-subsample/{task_name}/{task_name}_train.csv",
"validation": "https://huggingface.co/datasets/reglab/OpinionBench/resolve/main/data/automated-tasks-subsample/{task_name}/{task_name}_val.csv",
"test": "https://huggingface.co/datasets/reglab/OpinionBench/resolve/main/data/automated-tasks-subsample/{task_name}/{task_name}_test.csv",
}
TASK_NAMES = [
"ATS-Jurisdiction", "ATS-FavorableJudgment", "Chevron-Agency", "Chevron-ChevCited",
"Chevron-Dec.Ov.", "Chevron-Deference", "Chevron-Outcome", "Chevron-Subject",
"CoA-casetyp1", "CoA-direct1", "CoA-geniss", "CoA-typeiss",
"DC-casetype", "DC-category", "DC-libcon",
"JRC-AREA1", "JRC-CERT", "JRC-REVERSD",
"SSC-ca_disp", "SSC-ca_uscty", "SSC-death_c", "SSC-p1_persn",
"SC-petitioner", "SC-respondent", "SC-respondentState", "SC-jurisdiction",
"SC-adminAction", "SC-threeJudgeFdc", "SC-caseOrigin", "SC-caseOriginState",
"SC-caseSource", "SC-caseSourceState", "SC-lcDisagreement",
"SC-certReason", "SC-lcDisposition", "SC-lcDispositionDirection",
"SC-issue", "SC-issueArea", "SC-decisionDirection", "SC-decisionDirectionDissent",
"SC-authorityDecision1", "SC-lawType", "SC-decisionType", "SC-declarationUncon", "SC-caseDisposition",
"SC-partyWinning", "SC-precedentAlteration", "SC-voteUnclear", "SC-majOpinWriter", "SC-majVotes", "SC-minVotes"
]
_CONFIGS = {
task_name: {
"description": f"{task_name} specific legal opinions", # Dynamic description based on task name
"features": {
"idx": datasets.Value("string"),
"Citation": datasets.Value("string"),
"Full Case Name": datasets.Value("string"),
"Opinion Text": datasets.Value("string"),
"Numerical Label": datasets.Value("string"), # Will be optional for some tasks
#"Text Label": datasets.Value("string"), # Will be optional for some tasks
#"DC Numerical Label": datasets.Value("string")
#"Syllabus": datasets.Value("string") # Will be optional for some tasks
},
}
for task_name in TASK_NAMES
}
class LongConLDataset(datasets.GeneratorBasedBuilder):
"""Legal opinion classification dataset for LongConL tasks"""
def _info(self):
"""Return dataset information."""
features = datasets.Features({
"idx": datasets.Value("string"),
"Citation": datasets.Value("string"),
"Full Case Name": datasets.Value("string"),
"Opinion Text": datasets.Value("string"),
"Numerical Label": datasets.Value("string"), # Will be optional for some tasks
#"Text Label": datasets.Value("string"), # Will be optional for some tasks
#"DC Numerical Label": datasets.Value("string")
#"Syllabus": datasets.Value("string") # Will be optional for some tasks
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
task_name = self.config.name
# Replace hyphens with underscores if your URLs use underscores
urls = {
key: val.format(task_name=task_name) for key, val in _URLS.items()
}
downloaded_files = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": downloaded_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"file_path": downloaded_files["validation"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"file_path": downloaded_files["test"]},
),
]
def _generate_examples(self, file_path):
"""Generate examples from the dataset CSV."""
data = pd.read_csv(file_path)
print("Loaded file path:", file_path)
print("Shape of data:", data.shape)
print(data.head()) # Display first few rows
data_dict = data.to_dict(orient="records")
print(f"Number of examples to generate: {len(data_dict)}")
for id_, row in enumerate(data_dict):
yield id_, {
"idx": row["idx"],
"Citation": row["Citation"],
"Full Case Name": row["Full Case Name"],
"Opinion Text": row["Opinion Text"],
"Numerical Label": row.get("Numerical Label", None), # Use .get() to handle missing keys
#"Text Label": row["Text Label"],
#"DC Numerical Label": row["DC Numerical Label"]
#"Syllabus": row["Syllabus"]
}
# Use a dynamic config
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=task_name, version=datasets.Version("1.0.0"), description=task_name)
for task_name in TASK_NAMES
]