Datasets:
File size: 3,997 Bytes
6399ba5 88e3916 6399ba5 88e3916 db24d27 88e3916 c65b058 3ee8bd8 88e3916 006dab6 d4d15d6 88e3916 51ffb6c d4d15d6 6399ba5 88e3916 6399ba5 88e3916 0c3f039 6399ba5 88e3916 6399ba5 006dab6 d4d15d6 88e3916 006dab6 88e3916 247acc2 6399ba5 247acc2 693aca4 d4d15d6 693aca4 d4d15d6 88e3916 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import os
import json
import datasets
import csv
_DESCRIPTION = """\
MixBench is a benchmark for evaluating mixed-modality retrieval. It contains queries and corpora from four datasets: MSCOCO, Google_WIT, VisualNews, and OVEN. \
Each subset provides: query, corpus, mixed_corpus, and qrel splits.
"""
_HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
_SUBSETS = ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
class MixBenchConfig(datasets.BuilderConfig):
def __init__(self, name, **kwargs):
if name not in _SUBSETS:
raise ValueError(f"Unknown subset: {name}. Choose from {_SUBSETS}")
super().__init__(name=name, version=datasets.Version("1.0.0"), **kwargs)
class MixBench(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [MixBenchConfig(name=subset) for subset in _SUBSETS]
def _info(self):
features = datasets.Features({
"query_id": datasets.Value("string"),
"corpus_id": datasets.Value("string"),
"text": datasets.Value("string"),
"image": datasets.Value("string"),
"score": datasets.Value("int32"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
# download entire repo root and go to current subset folder
# data_dir = dl_manager.download_and_extract(".")
# subset_dir = os.path.join(data_dir, self.config.name)
# repo_root = dl_manager.manual_dir or os.path.join(os.path.dirname(__file__), self.config.name)
# subset_dir = os.path.join(repo_root, self.config.name)
subset_dir = os.path.join(dl_manager.manual_dir or dl_manager._base_path, self.config.name)
return [
datasets.SplitGenerator(
name="query",
gen_kwargs={"path": os.path.join(subset_dir, "queries.jsonl"), "split": "query"},
),
datasets.SplitGenerator(
name="corpus",
gen_kwargs={"path": os.path.join(subset_dir, "corpus.jsonl"), "split": "corpus"},
),
datasets.SplitGenerator(
name="mixed_corpus",
gen_kwargs={"path": os.path.join(subset_dir, "mixed_corpus.jsonl"), "split": "mixed_corpus"},
),
datasets.SplitGenerator(
name="qrel",
gen_kwargs={"path": os.path.join(subset_dir, "qrels", "qrels.tsv"), "split": "qrel"},
),
]
def _generate_examples(self, path, split):
if split == "qrel":
# with open(path, encoding="utf-8") as f:
# for idx, line in enumerate(f):
# qid, did, score = line.strip().split()
# yield idx, {
# "query_id": qid,
# "corpus_id": did,
# "text": "",
# "image": "",
# "score": int(score),
# }
with open(path, encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t") # 使用 DictReader 读取有表头的 tsv
for idx, row in enumerate(reader):
yield idx, {
"query_id": row["query_id"],
"corpus_id": row["corpus_id"],
"score": int(row["score"]),
}
else:
with open(path, encoding="utf-8") as f:
for idx, line in enumerate(f):
row = json.loads(line)
yield idx, {
"query_id": row.get("query_id", ""),
"corpus_id": row.get("corpus_id", ""),
"text": row.get("text", ""),
"image": row.get("image", ""),
"score": 0,
}
|