Datasets:

mFollowIR / mFollowIR.py
orionweller's picture
Update mFollowIR.py
5dcdb22 verified
import datasets
import json
_CITATION = '''@article{weller2024mfollowir,
title={{mFollowIR: a Multilingual Benchmark for Instruction Following in Information Retrieval}},
author={Weller, Orion and Chang, Benjamin and Yang, Eugene and Yarmohammadi, Mahsa and Barham, Sam and MacAvaney, Sean and Cohan, Arman and Soldaini, Luca and Van Durme, Benjamin and Lawrie, Dawn},
journal={arXiv preprint TODO},
year={2024}
}'''
_DESCRIPTION = 'Dataset load script for mFollowIR combining multiple languages'
_LANGUAGES = ["fas", "rus", "zho"]
class mFollowIRCrossLingual(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=f"{config_name}-{lang}",
version=datasets.Version("1.0.0"),
description=f"{config_name} configuration for {lang} language in mFollowIR dataset."
)
for config_name in ["qrels_og", "qrels_changed", "corpus", "queries", "top_ranked"]
for lang in _LANGUAGES
]
def _info(self):
name = self.config.name
if name.startswith("qrels"):
features = datasets.Features({
"query-id": datasets.Value("string"),
"corpus-id": datasets.Value("string"),
"score": datasets.Value("float64"),
})
elif name.startswith("corpus"):
features = datasets.Features({
"_id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
})
elif name.startswith("queries"):
features = datasets.Features({
"_id": datasets.Value("string"),
"text": datasets.Value("string"),
"instruction_og": datasets.Value("string"),
"instruction_changed": datasets.Value("string"),
})
elif name.startswith("top_ranked"):
features = datasets.Features({
"qid": datasets.Value("string"),
"pid": datasets.Value("string"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://arxiv.org/abs/2304.12367",
license=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
name, lang = self.config.name.split("-")
if name in ["qrels_og", "qrels_changed"]:
filepath = dl_manager.download_and_extract(f"https://huggingface.co/datasets/jhu-clsp/mFollowIR-{lang}-cl/resolve/main/{name}/test.jsonl")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": filepath},
),
]
elif name in ["corpus", "top_ranked"]:
filepath = dl_manager.download_and_extract(f"https://huggingface.co/datasets/jhu-clsp/mFollowIR-{lang}-cl/resolve/main/{name}.jsonl")
return [
datasets.SplitGenerator(
name=name,
gen_kwargs={"filepath": filepath},
),
]
elif name in ["queries"]:
filepath = dl_manager.download_and_extract(f"https://huggingface.co/datasets/jhu-clsp/mFollowIR/resolve/main/{lang}_map_final.jsonl")
return [
datasets.SplitGenerator(
name=name,
gen_kwargs={"filepath": filepath},
),
]
def _generate_examples(self, filepath):
name = self.config.name.split("-")[0]
with open(filepath, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
data = json.loads(line)
if name.startswith("qrels"):
yield idx, {
"query-id": data["query-id"],
"corpus-id": data["corpus-id"],
"score": data["score"],
}
elif name == "corpus":
yield idx, {
"_id": data["_id"],
"title": data["title"],
"text": data["text"],
}
elif name == "queries":
yield idx, {
"_id": data["query_id"],
"text": data["ht_text"],
"instruction_og": data["instruction_og"],
"instruction_changed": data["instruction_changed"],
}
elif name == "top_ranked":
yield idx, {
"qid": data["qid"],
"pid": data["pid"],
}