Datasets:

License:
srzhang commited on
Commit
5879764
·
verified ·
1 Parent(s): 3d10dbe

Create LongConL.py

Browse files
Files changed (1) hide show
  1. LongConL.py +118 -0
LongConL.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import pandas as pd
3
+
4
+ # Dataset metadata
5
+ _CITATION = """"""
6
+ _DESCRIPTION = """"""
7
+ _HOMEPAGE = ""
8
+ _LICENSE = ""
9
+
10
+ # Updated URLs to dynamically handle task names
11
+ _URLS = {
12
+ "train": "data/automated-tasks-subsample/{task_name}/{task_name}_subsample_train.csv",
13
+ "validation": "data/automated-tasks-subsample/{task_name}/{task_name}_subsample_val.csv",
14
+ "test": "data/automated-tasks-subsample/{task_name}/{task_name}_subsample_test.csv",
15
+ }
16
+
17
+ TASK_NAMES = [
18
+ "ATS-Jurisdiction", "ATS-FavorableJudgment", "Chevron-Agency", "Chevron-ChevCited",
19
+ "Chevron-Dec.Ov.", "Chevron-Deference", "Chevron-Outcome", "Chevron-Subject",
20
+ "CoA-casetyp1", "CoA-direct1", "CoA-geniss", "CoA-typeiss",
21
+ "DC-casetype", "DC-category", "DC-libcon",
22
+ "JRC-AREA1", "JRC-CERT", "JRC-REVERSD",
23
+ "SC-decisionDirection", "SC-issueArea", "SC-partyWinning",
24
+ "SC-petitioner", "SC-precedentAlteration",
25
+ "SSC-ca_disp", "SSC-ca_uscty", "SSC-death_c", "SSC-p1_persn"
26
+ ]
27
+
28
+ _CONFIGS = {
29
+ task_name: {
30
+ "description": f"{task_name} specific legal opinions", # Dynamic description based on task name
31
+ "features": {
32
+ "idx": datasets.Value("string"),
33
+ "Citation": datasets.Value("string"),
34
+ "Full Case Name": datasets.Value("string"),
35
+ "Opinion Text": datasets.Value("string"),
36
+ "Numerical Label": datasets.Value("string"), # Will be optional for some tasks
37
+ #"Text Label": datasets.Value("string"), # Will be optional for some tasks
38
+ #"DC Numerical Label": datasets.Value("string")
39
+ #"Syllabus": datasets.Value("string") # Will be optional for some tasks
40
+ },
41
+ }
42
+ for task_name in TASK_NAMES
43
+ }
44
+
45
+
46
+ class LongConLDataset(datasets.GeneratorBasedBuilder):
47
+ """Legal opinion classification dataset for LongConL tasks"""
48
+
49
+ def _info(self):
50
+ """Return dataset information."""
51
+ features = datasets.Features({
52
+ "idx": datasets.Value("string"),
53
+ "Citation": datasets.Value("string"),
54
+ "Full Case Name": datasets.Value("string"),
55
+ "Opinion Text": datasets.Value("string"),
56
+ "Numerical Label": datasets.Value("string"), # Will be optional for some tasks
57
+ #"Text Label": datasets.Value("string"), # Will be optional for some tasks
58
+ #"DC Numerical Label": datasets.Value("string")
59
+ #"Syllabus": datasets.Value("string") # Will be optional for some tasks
60
+ })
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=features,
64
+ homepage=_HOMEPAGE,
65
+ citation=_CITATION,
66
+ license=_LICENSE,
67
+ )
68
+
69
+ def _split_generators(self, dl_manager):
70
+ """Split the dataset into train, validation, and test."""
71
+ task_name = self.config.name # Get the current task name from the config
72
+ valid_task_name = task_name.replace("-", "-") # Replace hyphens with underscores
73
+
74
+ # Update URLs with the valid task name
75
+ urls = {key: val.format(task_name=valid_task_name) for key, val in _URLS.items()}
76
+ downloaded_files = dl_manager.download_and_extract(urls)
77
+
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TRAIN,
81
+ gen_kwargs={"file_path": downloaded_files["train"]},
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.VALIDATION,
85
+ gen_kwargs={"file_path": downloaded_files["validation"]},
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
+ gen_kwargs={"file_path": downloaded_files["test"]},
90
+ ),
91
+ ]
92
+
93
+ def _generate_examples(self, file_path):
94
+ """Generate examples from the dataset CSV."""
95
+ data = pd.read_csv(file_path)
96
+ print("Data loaded from file:", file_path)
97
+ print(data.head()) # Display first few rows
98
+ data_dict = data.to_dict(orient="records")
99
+ print(f"Number of examples to generate: {len(data_dict)}")
100
+
101
+ for id_, row in enumerate(data_dict):
102
+ yield id_, {
103
+ "idx": row["idx"],
104
+ "Citation": row["Citation"],
105
+ "Full Case Name": row["Full Case Name"],
106
+ "Opinion Text": row["Opinion Text"],
107
+ "Numerical Label": row.get("Numerical Label", None), # Use .get() to handle missing keys
108
+ #"Text Label": row["Text Label"],
109
+ #"DC Numerical Label": row["DC Numerical Label"]
110
+ #"Syllabus": row["Syllabus"]
111
+ }
112
+
113
+ # Use a dynamic config
114
+ BUILDER_CONFIGS = [
115
+ datasets.BuilderConfig(name=task_name, version=datasets.Version("1.0.0"), description=task_name)
116
+ for task_name in TASK_NAMES
117
+ ]
118
+