BEAR-HF / bear.py
jwiland's picture
Upload folder using huggingface_hub
4555cc0 verified
# coding=utf-8
# Copyright 2024 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The BEAR Dataset"""
import zipfile
import os
import re
import json
from pathlib import Path
from typing import List
import datasets
from datasets import DatasetBuilder, DownloadManager, DatasetInfo, SplitGenerator, Split, Value, features
_CITATION = """
@misc{wilandBEARUnifiedFramework2024,
title = {{{BEAR}}: {{A Unified Framework}} for {{Evaluating Relational Knowledge}} in {{Causal}} and {{Masked Language Models}}},
shorttitle = {{{BEAR}}},
author = {Wiland, Jacek and Ploner, Max and Akbik, Alan},
year = {2024},
number = {arXiv:2404.04113},
eprint = {2404.04113},
publisher = {arXiv},
url = {http://arxiv.org/abs/2404.04113},
}
"""
_DESCRIPTION = """The $\text{BEAR}$ dataset and its larger version, $\text{BEAR}_{\text{big}}$, are benchmarks for evaluating common factual knowledge contained in language models.
This dataset was created as part of the [paper "BEAR: A Unified Framework for Evaluating Relational Knowledge in Causal and Masked Language Models"](https://arxiv.org/abs/2404.04113).
"""
_HOMEPAGE = "https://lm-pub-quiz.github.io/"
_LICENSE = "The Creative Commons Attribution-Noncommercial 4.0 International License."
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
class BEAR(datasets.GeneratorBasedBuilder):
"""BEAR Dataset"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="BEAR-big", version=VERSION, description="Comprehensive superset of BEAR."),
datasets.BuilderConfig(name="BEAR", version=VERSION, description="BEAR subset with popular entities."),
]
DEFAULT_CONFIG_NAME = "BEAR"
def _info(self):
features = datasets.Features(
{
"sub_id": datasets.Value("string"),
"sub_label": datasets.Value("string"),
"sub_aliases": datasets.features.Sequence(datasets.Value("string")),
"obj_id": datasets.Value("string"),
"obj_label": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager):
"""Returns SplitGenerators, dynamically creating one for each relation."""
filenames = os.listdir(self.config.name)
filenames.sort(key=natural_keys)
splits = []
for filename in filenames:
if filename.endswith(".jsonl"):
relation_id = filename[:-6] # Assumes filename format is "PXX.jsonl"
splits.append(SplitGenerator(
name=Split(relation_id.upper()), # Creates a split named after the relation ID, e.g., "P19"
gen_kwargs={"filepaths": [os.path.join(self.config.name, filename)]},
))
return splits
def _generate_examples(self, filepaths: List[str]):
"""Yields examples from the BEAR dataset."""
for filepath in filepaths:
with open(filepath, encoding="utf-8") as f:
for row in f:
data = json.loads(row)
yield data['sub_id'], {
"sub_id": data["sub_id"],
"sub_label": data["sub_label"],
"sub_aliases": data["sub_aliases"],
"obj_id": data["obj_id"],
"obj_label": data["obj_label"],
}