servantez commited on
Commit
01691bd
·
1 Parent(s): cc5923b

Add dataset loader

Browse files
Files changed (1) hide show
  1. openexempt.py +165 -0
openexempt.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from pathlib import Path
4
+
5
+
6
+ _DESCRIPTION = "OpenExempt is a diagnostic benchmark for legal reasoning in language models."
7
+ _HOMEPAGE = "https://github.com/servantez/OpenExempt"
8
+ _LICENSE = "CC BY 4.0"
9
+ _VERSION = datasets.Version("1.0.0")
10
+ _CITATION = """"""
11
+
12
+ _SUITES = {
13
+ "advanced_competency": {
14
+ "archive": "data/advanced_competency.tar.gz",
15
+ "description": "Advanced Competency Suite."
16
+ },
17
+ "basic_competency": {
18
+ "archive": "data/basic_competency.tar.gz",
19
+ "description": "Basic Competency Suite."
20
+ },
21
+ "intermediate_competency": {
22
+ "archive": "data/intermediate_competency.tar.gz",
23
+ "description": "Intermediate Competency Suite."
24
+ },
25
+ "asset_scaling": {
26
+ "archive": "data/asset_scaling.tar.gz",
27
+ "description": "Asset Scaling Suite."
28
+ },
29
+ "temporal_reasoning": {
30
+ "archive": "data/temporal_reasoning.tar.gz",
31
+ "description": "Temporal Reasoning Suite."
32
+ },
33
+ "reasoning_decomposition": {
34
+ "archive": "data/reasoning_decomposition.tar.gz",
35
+ "description": "Reasoning Decomposition Suite."
36
+ },
37
+ "baseline_robustness": {
38
+ "archive": "data/baseline_robustness.tar.gz",
39
+ "description": "Baseline Robustness Suite."
40
+ },
41
+ "distractor_robustness": {
42
+ "archive": "data/distractor_robustness.tar.gz",
43
+ "description": "Distractor Robustness Suite."
44
+ },
45
+ "obfuscation_robustness": {
46
+ "archive": "data/obfuscation_robustness.tar.gz",
47
+ "description": "Obfuscation Robustness Suite."
48
+ },
49
+ "sycophancy_robustness": {
50
+ "archive": "data/sycophancy_robustness.tar.gz",
51
+ "description": "Sycophancy Robustness Suite."
52
+ },
53
+ }
54
+
55
+ def read_json(path: Path):
56
+ with path.open("r", encoding="utf-8") as file:
57
+ return json.load(file)
58
+
59
+ def read_jsonl_file(path: Path):
60
+ with path.open("r", encoding="utf-8") as file:
61
+ return [json.loads(line) for line in file if line.strip()]
62
+
63
+ class OpenExemptConfig(datasets.BuilderConfig):
64
+
65
+ def __init__(self, suite, **kwargs):
66
+ if suite == "all":
67
+ description = _DESCRIPTION
68
+ archives = [info["archive"] for info in _SUITES.values()]
69
+ else:
70
+ info = _SUITES[suite]
71
+ description = f"OpenExempt: {info['description']}"
72
+ archives = [info["archive"]]
73
+ super(OpenExemptConfig, self).__init__(
74
+ name=suite,
75
+ description=description,
76
+ version=_VERSION,
77
+ **kwargs)
78
+ self.suite = suite
79
+ self.archives = archives
80
+
81
+ class OpenExempt(datasets.GeneratorBasedBuilder):
82
+ BUILDER_CONFIG_CLASS = OpenExemptConfig
83
+ BUILDER_CONFIGS = [
84
+ OpenExemptConfig(suite=suite)
85
+ for suite in ["all"] + list(_SUITES.keys())
86
+ ]
87
+ DEFAULT_CONFIG_NAME = "all"
88
+
89
+ def _info(self):
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ homepage=_HOMEPAGE,
93
+ license=_LICENSE,
94
+ citation=_CITATION,
95
+ features=datasets.Features(
96
+ {
97
+ "id": datasets.Value("string"),
98
+ "prompt": datasets.Value("string"),
99
+ "solution": datasets.Value("string"),
100
+ "config": datasets.Value("string"),
101
+ "case": datasets.Value("string")
102
+ }
103
+ ),
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ extracted_paths = dl_manager.download_and_extract(self.config.archives)
108
+ dataset_dirs = []
109
+ for extracted_path in extracted_paths:
110
+ suite_dir = next(Path(extracted_path).iterdir())
111
+ for dataset_dir in suite_dir.iterdir():
112
+ if dataset_dir.is_dir():
113
+ dataset_dirs.append(str(dataset_dir))
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.VALIDATION,
117
+ gen_kwargs={
118
+ "dataset_dirs": dataset_dirs,
119
+ "split": "dev",
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ gen_kwargs={
125
+ "dataset_dirs": dataset_dirs,
126
+ "split": "test",
127
+ },
128
+ ),
129
+ ]
130
+
131
+ def _generate_examples(self, dataset_dirs, split):
132
+ for dataset_dir in dataset_dirs:
133
+ dataset_dir = Path(dataset_dir)
134
+
135
+ config = read_json(dataset_dir / "config.json")
136
+ shared = read_json(dataset_dir / "shared.json")
137
+ examples = read_jsonl_file(dataset_dir / f"{split}.jsonl")
138
+ case_file_name = 'cases' if split == 'test' else f'{split}_cases'
139
+ cases = read_jsonl_file(dataset_dir / f'{case_file_name}.jsonl')
140
+
141
+ if len(examples) != len(cases):
142
+ raise ValueError(f"Number of examples and cases do not match for dataset: {dataset_dir}")
143
+ for example, case in zip(examples, cases):
144
+ uid = example["uid"]
145
+ prompt_inputs = [
146
+ shared["instruction"],
147
+ shared["meta_instruction"],
148
+ shared["response_format"],
149
+ example["facts"],
150
+ example.get("solved_steps"), # Solved steps can be omitted
151
+ shared["statutes"],
152
+ shared["format_reminder"]
153
+ ]
154
+ prompt = '\n\n'.join(filter(None, prompt_inputs))
155
+ solution = example["solution"]
156
+ if isinstance(solution, dict):
157
+ solution = json.dumps(solution, sort_keys=True)
158
+
159
+ yield uid, {
160
+ "id": uid,
161
+ "prompt": prompt,
162
+ "solution": solution,
163
+ "config": json.dumps(config, sort_keys=True),
164
+ "case": json.dumps(case, sort_keys=True)
165
+ }