import json import datasets from pathlib import Path _DESCRIPTION = "OpenExempt is a diagnostic benchmark for legal reasoning in language models." _HOMEPAGE = "https://github.com/servantez/OpenExempt" _LICENSE = "CC BY 4.0" _VERSION = datasets.Version("1.0.0") _CITATION = """ @misc{servantez2026openexemptdiagnosticbenchmarklegal, title={OpenExempt: A Diagnostic Benchmark for Legal Reasoning and a Framework for Creating Custom Benchmarks on Demand}, author={Sergio Servantez and Sarah B. Lawsky and Rajiv Jain and Daniel W. Linna Jr. and Kristian Hammond}, year={2026}, eprint={2601.13183}, archivePrefix={arXiv}, primaryClass={cs.CL}, url={https://arxiv.org/abs/2601.13183}, } """ _SUITES = { "advanced_competency": { "archive": "data/advanced_competency.tar.gz", "description": "Advanced Competency Suite." }, "basic_competency": { "archive": "data/basic_competency.tar.gz", "description": "Basic Competency Suite." }, "intermediate_competency": { "archive": "data/intermediate_competency.tar.gz", "description": "Intermediate Competency Suite." }, "asset_scaling": { "archive": "data/asset_scaling.tar.gz", "description": "Asset Scaling Suite." }, "temporal_reasoning": { "archive": "data/temporal_reasoning.tar.gz", "description": "Temporal Reasoning Suite." }, "reasoning_decomposition": { "archive": "data/reasoning_decomposition.tar.gz", "description": "Reasoning Decomposition Suite." }, "baseline_robustness": { "archive": "data/baseline_robustness.tar.gz", "description": "Baseline Robustness Suite." }, "distractor_robustness": { "archive": "data/distractor_robustness.tar.gz", "description": "Distractor Robustness Suite." }, "obfuscation_robustness": { "archive": "data/obfuscation_robustness.tar.gz", "description": "Obfuscation Robustness Suite." }, "sycophancy_robustness": { "archive": "data/sycophancy_robustness.tar.gz", "description": "Sycophancy Robustness Suite." }, } def read_json(path: Path): with path.open("r", encoding="utf-8") as file: return json.load(file) def read_jsonl_file(path: Path): with path.open("r", encoding="utf-8") as file: return [json.loads(line) for line in file if line.strip()] class OpenExemptConfig(datasets.BuilderConfig): def __init__(self, suite, **kwargs): if suite == "all": description = _DESCRIPTION archives = [info["archive"] for info in _SUITES.values()] else: info = _SUITES[suite] description = f"OpenExempt: {info['description']}" archives = [info["archive"]] super(OpenExemptConfig, self).__init__( name=suite, description=description, version=_VERSION, **kwargs) self.suite = suite self.archives = archives class OpenExempt(datasets.GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = OpenExemptConfig BUILDER_CONFIGS = [ OpenExemptConfig(suite=suite) for suite in ["all"] + list(_SUITES.keys()) ] DEFAULT_CONFIG_NAME = "all" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, features=datasets.Features( { "id": datasets.Value("string"), "prompt": datasets.Value("string"), "solution": datasets.Value("string"), "config": datasets.Value("string"), "case": datasets.Value("string") } ), ) def _split_generators(self, dl_manager): extracted_paths = dl_manager.download_and_extract(self.config.archives) dataset_dirs = [] for extracted_path in extracted_paths: suite_dir = next(Path(extracted_path).iterdir()) for dataset_dir in suite_dir.iterdir(): if dataset_dir.is_dir(): dataset_dirs.append(str(dataset_dir)) return [ datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "dataset_dirs": dataset_dirs, "split": "dev", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "dataset_dirs": dataset_dirs, "split": "test", }, ), ] def _generate_examples(self, dataset_dirs, split): for dataset_dir in dataset_dirs: dataset_dir = Path(dataset_dir) config = read_json(dataset_dir / "config.json") shared = read_json(dataset_dir / "shared.json") examples = read_jsonl_file(dataset_dir / f"{split}.jsonl") case_file_name = 'cases' if split == 'test' else f'{split}_cases' cases = read_jsonl_file(dataset_dir / f'{case_file_name}.jsonl') if len(examples) != len(cases): raise ValueError(f"Number of examples and cases do not match for dataset: {dataset_dir}") for example, case in zip(examples, cases): uid = example["uid"] prompt_inputs = [ shared["instruction"], shared["meta_instruction"], shared["response_format"], example["facts"], example.get("solved_steps"), # Solved steps can be omitted shared["statutes"], shared["format_reminder"] ] prompt = '\n\n'.join(filter(None, prompt_inputs)) solution = example["solution"] if isinstance(solution, dict): solution = json.dumps(solution, sort_keys=True) yield uid, { "id": uid, "prompt": prompt, "solution": solution, "config": json.dumps(config, sort_keys=True), "case": json.dumps(case, sort_keys=True) }