Sean MacAvaney commited on
Commit
01d9f2e
·
1 Parent(s): fbaef18

commit files to HF hub

Browse files
Files changed (2) hide show
  1. README.md +61 -0
  2. beir_nq.py +43 -0
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: '`beir/nq`'
3
+ viewer: false
4
+ source_datasets: []
5
+ task_categories:
6
+ - text-retrieval
7
+ ---
8
+
9
+ # Dataset Card for `beir/nq`
10
+
11
+ The `beir/nq` dataset, provided by the [ir-datasets](https://ir-datasets.com/) package.
12
+ For more information about the dataset, see the [documentation](https://ir-datasets.com/beir#beir/nq).
13
+
14
+ # Data
15
+
16
+ This dataset provides:
17
+ - `docs` (documents, i.e., the corpus); count=2,681,468
18
+ - `queries` (i.e., topics); count=3,452
19
+ - `qrels`: (relevance assessments); count=4,201
20
+
21
+
22
+ ## Usage
23
+
24
+ ```python
25
+ from datasets import load_dataset
26
+
27
+ docs = load_dataset('irds/beir_nq', 'docs')
28
+ for record in docs:
29
+ record # {'doc_id': ..., 'text': ..., 'title': ...}
30
+
31
+ queries = load_dataset('irds/beir_nq', 'queries')
32
+ for record in queries:
33
+ record # {'query_id': ..., 'text': ...}
34
+
35
+ qrels = load_dataset('irds/beir_nq', 'qrels')
36
+ for record in qrels:
37
+ record # {'query_id': ..., 'doc_id': ..., 'relevance': ..., 'iteration': ...}
38
+
39
+ ```
40
+
41
+ Note that calling `load_dataset` will download the dataset (or provide access instructions when it's not public) and make a copy of the
42
+ data in 🤗 Dataset format.
43
+
44
+ ## Citation Information
45
+
46
+ ```
47
+ @article{Kwiatkowski2019Nq,
48
+ title = {Natural Questions: a Benchmark for Question Answering Research},
49
+ author = {Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
50
+ year = {2019},
51
+ journal = {TACL}
52
+ }
53
+ @article{Thakur2021Beir,
54
+ title = "BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models",
55
+ author = "Thakur, Nandan and Reimers, Nils and Rücklé, Andreas and Srivastava, Abhishek and Gurevych, Iryna",
56
+ journal= "arXiv preprint arXiv:2104.08663",
57
+ month = "4",
58
+ year = "2021",
59
+ url = "https://arxiv.org/abs/2104.08663",
60
+ }
61
+ ```
beir_nq.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ """ # TODO
4
+ try:
5
+ import ir_datasets
6
+ except ImportError as e:
7
+ raise ImportError('ir-datasets package missing; `pip install ir-datasets`')
8
+ import datasets
9
+
10
+ IRDS_ID = 'beir/nq'
11
+ IRDS_ENTITY_TYPES = {'docs': {'doc_id': 'string', 'text': 'string', 'title': 'string'}, 'queries': {'query_id': 'string', 'text': 'string'}, 'qrels': {'query_id': 'string', 'doc_id': 'string', 'relevance': 'int64', 'iteration': 'string'}}
12
+
13
+ _CITATION = '@article{Kwiatkowski2019Nq,\n title = {Natural Questions: a Benchmark for Question Answering Research},\n author = {Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},\n year = {2019},\n journal = {TACL}\n}\n@article{Thakur2021Beir,\n title = "BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models",\n author = "Thakur, Nandan and Reimers, Nils and Rücklé, Andreas and Srivastava, Abhishek and Gurevych, Iryna", \n journal= "arXiv preprint arXiv:2104.08663",\n month = "4",\n year = "2021",\n url = "https://arxiv.org/abs/2104.08663",\n}'
14
+
15
+ _DESCRIPTION = "" # TODO
16
+
17
+ class beir_nq(datasets.GeneratorBasedBuilder):
18
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name=e) for e in IRDS_ENTITY_TYPES]
19
+
20
+ def _info(self):
21
+ return datasets.DatasetInfo(
22
+ description=_DESCRIPTION,
23
+ features=datasets.Features({k: datasets.Value(v) for k, v in IRDS_ENTITY_TYPES[self.config.name].items()}),
24
+ homepage=f"https://ir-datasets.com/beir#beir/nq",
25
+ citation=_CITATION,
26
+ )
27
+
28
+ def _split_generators(self, dl_manager):
29
+ return [datasets.SplitGenerator(name=self.config.name)]
30
+
31
+ def _generate_examples(self):
32
+ dataset = ir_datasets.load(IRDS_ID)
33
+ for i, item in enumerate(getattr(dataset, self.config.name)):
34
+ key = i
35
+ if self.config.name == 'docs':
36
+ key = item.doc_id
37
+ elif self.config.name == 'queries':
38
+ key = item.query_id
39
+ yield key, item._asdict()
40
+
41
+ def as_dataset(self, split=None, *args, **kwargs):
42
+ split = self.config.name # always return split corresponding with this config to avid returning a redundant DatasetDict layer
43
+ return super().as_dataset(split, *args, **kwargs)