| import json |
| import csv |
| import os |
| import datasets |
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
|
|
| _DESCRIPTION = "BEIR Benchmark" |
| _DATASETS = ["fiqa", "trec-covid"] |
|
|
| URL = "" |
| _URLs = { |
| dataset: { |
| "corpus": URL + f"{dataset}/corpus.jsonl", |
| } for dataset in _DATASETS} |
|
|
|
|
| class BEIR(datasets.GeneratorBasedBuilder): |
| """BEIR BenchmarkDataset.""" |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig( |
| name=dataset, |
| description=f"This is the {dataset} dataset in BEIR Benchmark.", |
| ) for dataset in _DATASETS |
| ] |
| |
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features({ |
| "_id": datasets.Value("string"), |
| "title": datasets.Value("string"), |
| "text": datasets.Value("string") |
| }), |
| supervised_keys=None, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
|
|
| my_urls = _URLs[self.config.name] |
| data_dir = dl_manager.download_and_extract(my_urls) |
| return [ |
| datasets.SplitGenerator( |
| name="corpus", |
| |
| gen_kwargs={"corpus_path": data_dir["corpus"]} |
| ), |
| ] |
|
|
| def _generate_examples(self, corpus_path): |
| """Yields examples.""" |
| with open(corpus_path, encoding="utf-8") as f: |
| texts = f.readlines() |
| for i, text in enumerate(texts): |
| yield i, json.loads(text) |