|
|
import csv
|
|
|
import datasets
|
|
|
|
|
|
_CITATION = """\
|
|
|
@misc{abdallah2025good,
|
|
|
title={How Good are LLM-based Rerankers? An Empirical Analysis of State-of-the-Art Reranking Models},
|
|
|
author={Abdelrahman Abdallah and Bhawna Piryani and Jamshid Mozafari and Mohammed Ali and Adam Jatowt},
|
|
|
year={2025},
|
|
|
eprint={2508.16757},
|
|
|
archivePrefix={arXiv},
|
|
|
primaryClass={cs.CL}
|
|
|
}
|
|
|
"""
|
|
|
|
|
|
_DESCRIPTION = """\
|
|
|
FutureQueryEval is a novel IR benchmark comprising 148 queries with 2,938 query-document pairs
|
|
|
across 7 topical categories, designed to evaluate reranker performance on temporal novelty.
|
|
|
All queries refer to events after April 2025 to ensure zero contamination with LLM pretraining data.
|
|
|
"""
|
|
|
|
|
|
_HOMEPAGE = "https://github.com/DataScienceUIBK/llm-reranking-generalization-study"
|
|
|
|
|
|
_LICENSE = "Apache-2.0"
|
|
|
|
|
|
_URLS = {
|
|
|
"queries": "queries.csv",
|
|
|
"corpus": "corpus.tsv",
|
|
|
"qrels": "qrels.txt",
|
|
|
}
|
|
|
|
|
|
class FutureQueryEval(datasets.GeneratorBasedBuilder):
|
|
|
"""FutureQueryEval dataset for temporal IR evaluation."""
|
|
|
|
|
|
VERSION = datasets.Version("1.0.0")
|
|
|
|
|
|
BUILDER_CONFIGS = [
|
|
|
datasets.BuilderConfig(
|
|
|
name="queries",
|
|
|
version=VERSION,
|
|
|
description="Query collection with categories",
|
|
|
),
|
|
|
datasets.BuilderConfig(
|
|
|
name="corpus",
|
|
|
version=VERSION,
|
|
|
description="Document corpus",
|
|
|
),
|
|
|
datasets.BuilderConfig(
|
|
|
name="qrels",
|
|
|
version=VERSION,
|
|
|
description="Relevance judgments",
|
|
|
),
|
|
|
]
|
|
|
|
|
|
DEFAULT_CONFIG_NAME = "queries"
|
|
|
|
|
|
def _info(self):
|
|
|
if self.config.name == "queries":
|
|
|
features = datasets.Features({
|
|
|
"query_id": datasets.Value("string"),
|
|
|
"query_text": datasets.Value("string"),
|
|
|
"category": datasets.Value("string"),
|
|
|
})
|
|
|
elif self.config.name == "corpus":
|
|
|
features = datasets.Features({
|
|
|
"doc_id": datasets.Value("string"),
|
|
|
"title": datasets.Value("string"),
|
|
|
"text": datasets.Value("string"),
|
|
|
"url": datasets.Value("string"),
|
|
|
})
|
|
|
elif self.config.name == "qrels":
|
|
|
features = datasets.Features({
|
|
|
"query_id": datasets.Value("string"),
|
|
|
"iteration": datasets.Value("int32"),
|
|
|
"doc_id": datasets.Value("string"),
|
|
|
"relevance": datasets.Value("int32"),
|
|
|
})
|
|
|
|
|
|
return datasets.DatasetInfo(
|
|
|
description=_DESCRIPTION,
|
|
|
features=features,
|
|
|
homepage=_HOMEPAGE,
|
|
|
license=_LICENSE,
|
|
|
citation=_CITATION,
|
|
|
)
|
|
|
|
|
|
def _split_generators(self, dl_manager):
|
|
|
downloaded_files = dl_manager.download(_URLS)
|
|
|
|
|
|
if self.config.name == "queries":
|
|
|
return [
|
|
|
datasets.SplitGenerator(
|
|
|
name="queries",
|
|
|
gen_kwargs={"filepath": downloaded_files["queries"]},
|
|
|
),
|
|
|
]
|
|
|
elif self.config.name == "corpus":
|
|
|
return [
|
|
|
datasets.SplitGenerator(
|
|
|
name="corpus",
|
|
|
gen_kwargs={"filepath": downloaded_files["corpus"]},
|
|
|
),
|
|
|
]
|
|
|
elif self.config.name == "qrels":
|
|
|
return [
|
|
|
datasets.SplitGenerator(
|
|
|
name="qrels",
|
|
|
gen_kwargs={"filepath": downloaded_files["qrels"]},
|
|
|
),
|
|
|
]
|
|
|
|
|
|
def _generate_examples(self, filepath):
|
|
|
if self.config.name == "queries":
|
|
|
with open(filepath, encoding="utf-8") as f:
|
|
|
reader = csv.DictReader(f, delimiter=",")
|
|
|
for key, row in enumerate(reader):
|
|
|
yield key, {
|
|
|
"query_id": row["query_id"],
|
|
|
"query_text": row["query_text"],
|
|
|
"category": row["category"],
|
|
|
}
|
|
|
|
|
|
elif self.config.name == "corpus":
|
|
|
with open(filepath, encoding="utf-8") as f:
|
|
|
reader = csv.DictReader(f, delimiter="\t")
|
|
|
for key, row in enumerate(reader):
|
|
|
yield key, {
|
|
|
"doc_id": row["doc_id"],
|
|
|
"title": row["title"],
|
|
|
"text": row["text"],
|
|
|
"url": row["url"],
|
|
|
}
|
|
|
|
|
|
elif self.config.name == "qrels":
|
|
|
with open(filepath, encoding="utf-8") as f:
|
|
|
for key, line in enumerate(f):
|
|
|
parts = line.strip().split()
|
|
|
if len(parts) == 4:
|
|
|
yield key, {
|
|
|
"query_id": parts[0],
|
|
|
"iteration": int(parts[1]),
|
|
|
"doc_id": parts[2],
|
|
|
"relevance": int(parts[3]),
|
|
|
} |