mixed-modality-search commited on
Commit
006dab6
·
verified ·
1 Parent(s): 1a962dd

Add files using upload-large-folder tool

Browse files
Files changed (1) hide show
  1. mixbench.py +48 -60
mixbench.py CHANGED
@@ -3,95 +3,83 @@ import json
3
  import csv
4
  import datasets
5
 
6
- _DESCRIPTION = "MixBench: Mixed-Modality Retrieval Benchmark"
 
 
 
 
7
  _HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
8
 
 
 
 
9
  class MixBench(datasets.GeneratorBasedBuilder):
10
  BUILDER_CONFIGS = [
11
- datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description=f"Subset = {name}")
12
- for name in ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
 
 
 
 
 
13
  ]
14
 
15
  def _info(self):
16
  return datasets.DatasetInfo(
17
  description=_DESCRIPTION,
 
18
  features=datasets.Features({
19
- "id": datasets.Value("string"),
 
20
  "text": datasets.Value("string"),
21
- }),
22
- homepage=_HOMEPAGE,
 
23
  )
24
 
25
  def _split_generators(self, dl_manager):
26
- name = self.config.name
27
- base_dir = os.path.join(dl_manager.manual_dir or dl_manager.download_and_extract(""), name)
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  return [
30
- datasets.SplitGenerator(name="query", gen_kwargs={"split": "query", "filepath": os.path.join(base_dir, "queries.jsonl")}),
31
- datasets.SplitGenerator(name="corpus", gen_kwargs={"split": "corpus", "filepath": os.path.join(base_dir, "corpus.jsonl")}),
32
- datasets.SplitGenerator(name="mixed_corpus", gen_kwargs={"split": "mixed_corpus", "filepath": os.path.join(base_dir, "mixed_corpus.jsonl")}),
33
- datasets.SplitGenerator(name="qrel", gen_kwargs={"split": "qrel", "filepath": os.path.join(base_dir, "qrels", "qrels.tsv")}),
34
  ]
35
 
36
- def _generate_examples(self, filepath, split):
37
- if split == "query":
38
- with open(filepath, encoding="utf-8") as f:
39
  for idx, line in enumerate(f):
40
  item = json.loads(line)
41
  yield idx, {
42
- "query_id": item["query_id"],
43
- "text": item["text"],
44
- "image": item.get("image", ""),
45
- }
46
-
47
- elif split in {"corpus", "mixed_corpus"}:
48
- with open(filepath, encoding="utf-8") as f:
49
- for idx, line in enumerate(f):
50
- item = json.loads(line)
51
- yield idx, {
52
- "corpus_id": item["corpus_id"],
53
- "text": item["text"],
54
  "image": item.get("image", ""),
 
55
  }
56
 
57
  elif split == "qrel":
58
- with open(filepath, encoding="utf-8") as f:
59
  reader = csv.DictReader(f, delimiter="\t")
60
  for idx, row in enumerate(reader):
61
  yield idx, {
62
  "query_id": row["query_id"],
63
  "corpus_id": row["corpus_id"],
 
 
64
  "score": int(row["score"]),
65
  }
66
-
67
- def _info(self):
68
- split = datasets.Split(self.config.data_files["split"]) if hasattr(self.config, "data_files") else None
69
- if split == "query":
70
- features = datasets.Features({
71
- "query_id": datasets.Value("string"),
72
- "text": datasets.Value("string"),
73
- "image": datasets.Value("string"),
74
- })
75
- elif split in {"corpus", "mixed_corpus"}:
76
- features = datasets.Features({
77
- "corpus_id": datasets.Value("string"),
78
- "text": datasets.Value("string"),
79
- "image": datasets.Value("string"),
80
- })
81
- elif split == "qrel":
82
- features = datasets.Features({
83
- "query_id": datasets.Value("string"),
84
- "corpus_id": datasets.Value("string"),
85
- "score": datasets.Value("int32"),
86
- })
87
- else:
88
- features = datasets.Features({
89
- "id": datasets.Value("string"),
90
- "text": datasets.Value("string"),
91
- })
92
-
93
- return datasets.DatasetInfo(
94
- description=_DESCRIPTION,
95
- features=features,
96
- homepage=_HOMEPAGE,
97
- )
 
3
  import csv
4
  import datasets
5
 
6
+ _DESCRIPTION = """
7
+ MixBench is a benchmark for mixed-modality retrieval across text, image, and image+text corpora.
8
+ Each config corresponds to a (dataset, split) pair.
9
+ """
10
+
11
  _HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
12
 
13
+ _SUBSETS = ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
14
+ _SPLITS = ["query", "corpus", "mixed_corpus", "qrel"]
15
+
16
  class MixBench(datasets.GeneratorBasedBuilder):
17
  BUILDER_CONFIGS = [
18
+ datasets.BuilderConfig(
19
+ name=f"{subset}_{split}",
20
+ version=datasets.Version("1.0.0"),
21
+ description=f"{subset} - {split}"
22
+ )
23
+ for subset in _SUBSETS
24
+ for split in _SPLITS
25
  ]
26
 
27
  def _info(self):
28
  return datasets.DatasetInfo(
29
  description=_DESCRIPTION,
30
+ homepage=_HOMEPAGE,
31
  features=datasets.Features({
32
+ "query_id": datasets.Value("string"),
33
+ "corpus_id": datasets.Value("string"),
34
  "text": datasets.Value("string"),
35
+ "image": datasets.Value("string"),
36
+ "score": datasets.Value("int32"),
37
+ })
38
  )
39
 
40
  def _split_generators(self, dl_manager):
41
+ subset, split = self.config.name.split("_", 1)
42
+ subset_dir = os.path.join(dl_manager.manual_dir or dl_manager.download_and_extract(""), subset)
43
+
44
+ if split == "query":
45
+ file_path = os.path.join(subset_dir, "queries.jsonl")
46
+ elif split == "corpus":
47
+ file_path = os.path.join(subset_dir, "corpus.jsonl")
48
+ elif split == "mixed_corpus":
49
+ file_path = os.path.join(subset_dir, "mixed_corpus.jsonl")
50
+ elif split == "qrel":
51
+ file_path = os.path.join(subset_dir, "qrels", "qrels.tsv")
52
+ else:
53
+ raise ValueError(f"Unknown split: {split}")
54
 
55
  return [
56
+ datasets.SplitGenerator(
57
+ name=datasets.Split.TRAIN,
58
+ gen_kwargs={"path": file_path, "split": split},
59
+ )
60
  ]
61
 
62
+ def _generate_examples(self, path, split):
63
+ if split in {"query", "corpus", "mixed_corpus"}:
64
+ with open(path, encoding="utf-8") as f:
65
  for idx, line in enumerate(f):
66
  item = json.loads(line)
67
  yield idx, {
68
+ "query_id": item.get("query_id", "") if split == "query" else "",
69
+ "corpus_id": item.get("corpus_id", "") if split in {"corpus", "mixed_corpus"} else "",
70
+ "text": item.get("text", ""),
 
 
 
 
 
 
 
 
 
71
  "image": item.get("image", ""),
72
+ "score": 0,
73
  }
74
 
75
  elif split == "qrel":
76
+ with open(path, encoding="utf-8") as f:
77
  reader = csv.DictReader(f, delimiter="\t")
78
  for idx, row in enumerate(reader):
79
  yield idx, {
80
  "query_id": row["query_id"],
81
  "corpus_id": row["corpus_id"],
82
+ "text": "",
83
+ "image": "",
84
  "score": int(row["score"]),
85
  }