| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """The BEAR Dataset""" |
|
|
| import zipfile |
| import os |
| import re |
| import json |
| from pathlib import Path |
| from typing import List |
| import datasets |
| from datasets import DatasetBuilder, DownloadManager, DatasetInfo, SplitGenerator, Split, Value, features |
|
|
|
|
|
|
| _CITATION = """ |
| @misc{wilandBEARUnifiedFramework2024, |
| title = {{{BEAR}}: {{A Unified Framework}} for {{Evaluating Relational Knowledge}} in {{Causal}} and {{Masked Language Models}}}, |
| shorttitle = {{{BEAR}}}, |
| author = {Wiland, Jacek and Ploner, Max and Akbik, Alan}, |
| year = {2024}, |
| number = {arXiv:2404.04113}, |
| eprint = {2404.04113}, |
| publisher = {arXiv}, |
| url = {http://arxiv.org/abs/2404.04113}, |
| } |
| """ |
|
|
| _DESCRIPTION = """The $\text{BEAR}$ dataset and its larger version, $\text{BEAR}_{\text{big}}$, are benchmarks for evaluating common factual knowledge contained in language models. |
| |
| This dataset was created as part of the [paper "BEAR: A Unified Framework for Evaluating Relational Knowledge in Causal and Masked Language Models"](https://arxiv.org/abs/2404.04113). |
| """ |
|
|
| _HOMEPAGE = "https://lm-pub-quiz.github.io/" |
|
|
| _LICENSE = "The Creative Commons Attribution-Noncommercial 4.0 International License." |
|
|
|
|
| def atoi(text): |
| return int(text) if text.isdigit() else text |
|
|
| def natural_keys(text): |
| ''' |
| alist.sort(key=natural_keys) sorts in human order |
| http://nedbatchelder.com/blog/200712/human_sorting.html |
| (See Toothy's implementation in the comments) |
| ''' |
| return [atoi(c) for c in re.split(r'(\d+)', text)] |
|
|
|
|
| class BEAR(datasets.GeneratorBasedBuilder): |
| """BEAR Dataset""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig(name="BEAR-big", version=VERSION, description="Comprehensive superset of BEAR."), |
| datasets.BuilderConfig(name="BEAR", version=VERSION, description="BEAR subset with popular entities."), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "BEAR" |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "sub_id": datasets.Value("string"), |
| "sub_label": datasets.Value("string"), |
| "sub_aliases": datasets.features.Sequence(datasets.Value("string")), |
| "obj_id": datasets.Value("string"), |
| "obj_label": datasets.Value("string"), |
| } |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| supervised_keys=None, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
|
|
|
|
|
|
| def _split_generators(self, dl_manager: DownloadManager): |
| """Returns SplitGenerators, dynamically creating one for each relation.""" |
| filenames = os.listdir(self.config.name) |
| filenames.sort(key=natural_keys) |
| splits = [] |
| for filename in filenames: |
| if filename.endswith(".jsonl"): |
| relation_id = filename[:-6] |
| splits.append(SplitGenerator( |
| name=Split(relation_id.upper()), |
| gen_kwargs={"filepaths": [os.path.join(self.config.name, filename)]}, |
| )) |
| return splits |
|
|
| def _generate_examples(self, filepaths: List[str]): |
| """Yields examples from the BEAR dataset.""" |
| for filepath in filepaths: |
| with open(filepath, encoding="utf-8") as f: |
| for row in f: |
| data = json.loads(row) |
| yield data['sub_id'], { |
| "sub_id": data["sub_id"], |
| "sub_label": data["sub_label"], |
| "sub_aliases": data["sub_aliases"], |
| "obj_id": data["obj_id"], |
| "obj_label": data["obj_label"], |
| } |