mixed-modality-search commited on
Commit
1a962dd
·
verified ·
1 Parent(s): b6a826f

Add files using upload-large-folder tool

Browse files
Files changed (1) hide show
  1. mixbench.py +47 -39
mixbench.py CHANGED
@@ -3,87 +3,95 @@ import json
3
  import csv
4
  import datasets
5
 
6
- _DESCRIPTION = """
7
- MixBench is a benchmark for mixed-modality retrieval across text, image, and image+text corpora.
8
- """
9
-
10
  _HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
11
 
12
  class MixBench(datasets.GeneratorBasedBuilder):
13
  BUILDER_CONFIGS = [
14
- datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description=f"{name} subset of MixBench")
15
  for name in ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
16
  ]
17
 
18
  def _info(self):
19
- # ⚠️ unified superset schema (all splits must use this)
20
  return datasets.DatasetInfo(
21
  description=_DESCRIPTION,
22
- homepage=_HOMEPAGE,
23
  features=datasets.Features({
24
- "query_id": datasets.Value("string"),
25
- "corpus_id": datasets.Value("string"),
26
  "text": datasets.Value("string"),
27
- "image": datasets.Value("string"),
28
- "score": datasets.Value("int32"),
29
  }),
 
30
  )
31
 
32
  def _split_generators(self, dl_manager):
33
- subset_dir = os.path.join(dl_manager.manual_dir or dl_manager._base_path, self.config.name)
 
34
 
35
  return [
36
- datasets.SplitGenerator(
37
- name="query",
38
- gen_kwargs={"path": os.path.join(subset_dir, "queries.jsonl"), "split": "query"},
39
- ),
40
- datasets.SplitGenerator(
41
- name="corpus",
42
- gen_kwargs={"path": os.path.join(subset_dir, "corpus.jsonl"), "split": "corpus"},
43
- ),
44
- datasets.SplitGenerator(
45
- name="mixed_corpus",
46
- gen_kwargs={"path": os.path.join(subset_dir, "mixed_corpus.jsonl"), "split": "mixed_corpus"},
47
- ),
48
- datasets.SplitGenerator(
49
- name="qrel",
50
- gen_kwargs={"path": os.path.join(subset_dir, "qrels", "qrels.tsv"), "split": "qrel"},
51
- ),
52
  ]
53
 
54
- def _generate_examples(self, path, split):
55
  if split == "query":
56
- with open(path, encoding="utf-8") as f:
57
  for idx, line in enumerate(f):
58
  item = json.loads(line)
59
  yield idx, {
60
  "query_id": item["query_id"],
61
- "corpus_id": "",
62
  "text": item["text"],
63
  "image": item.get("image", ""),
64
- "score": 0,
65
  }
66
 
67
  elif split in {"corpus", "mixed_corpus"}:
68
- with open(path, encoding="utf-8") as f:
69
  for idx, line in enumerate(f):
70
  item = json.loads(line)
71
  yield idx, {
72
- "query_id": "",
73
  "corpus_id": item["corpus_id"],
74
  "text": item["text"],
75
- "image": item["image"],
76
- "score": 0,
77
  }
78
 
79
  elif split == "qrel":
80
- with open(path, encoding="utf-8") as f:
81
  reader = csv.DictReader(f, delimiter="\t")
82
  for idx, row in enumerate(reader):
83
  yield idx, {
84
  "query_id": row["query_id"],
85
  "corpus_id": row["corpus_id"],
86
- "text": "",
87
- "image": "",
88
  "score": int(row["score"]),
89
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import csv
4
  import datasets
5
 
6
+ _DESCRIPTION = "MixBench: Mixed-Modality Retrieval Benchmark"
 
 
 
7
  _HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
8
 
9
  class MixBench(datasets.GeneratorBasedBuilder):
10
  BUILDER_CONFIGS = [
11
+ datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description=f"Subset = {name}")
12
  for name in ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
13
  ]
14
 
15
  def _info(self):
 
16
  return datasets.DatasetInfo(
17
  description=_DESCRIPTION,
 
18
  features=datasets.Features({
19
+ "id": datasets.Value("string"),
 
20
  "text": datasets.Value("string"),
 
 
21
  }),
22
+ homepage=_HOMEPAGE,
23
  )
24
 
25
  def _split_generators(self, dl_manager):
26
+ name = self.config.name
27
+ base_dir = os.path.join(dl_manager.manual_dir or dl_manager.download_and_extract(""), name)
28
 
29
  return [
30
+ datasets.SplitGenerator(name="query", gen_kwargs={"split": "query", "filepath": os.path.join(base_dir, "queries.jsonl")}),
31
+ datasets.SplitGenerator(name="corpus", gen_kwargs={"split": "corpus", "filepath": os.path.join(base_dir, "corpus.jsonl")}),
32
+ datasets.SplitGenerator(name="mixed_corpus", gen_kwargs={"split": "mixed_corpus", "filepath": os.path.join(base_dir, "mixed_corpus.jsonl")}),
33
+ datasets.SplitGenerator(name="qrel", gen_kwargs={"split": "qrel", "filepath": os.path.join(base_dir, "qrels", "qrels.tsv")}),
 
 
 
 
 
 
 
 
 
 
 
 
34
  ]
35
 
36
+ def _generate_examples(self, filepath, split):
37
  if split == "query":
38
+ with open(filepath, encoding="utf-8") as f:
39
  for idx, line in enumerate(f):
40
  item = json.loads(line)
41
  yield idx, {
42
  "query_id": item["query_id"],
 
43
  "text": item["text"],
44
  "image": item.get("image", ""),
 
45
  }
46
 
47
  elif split in {"corpus", "mixed_corpus"}:
48
+ with open(filepath, encoding="utf-8") as f:
49
  for idx, line in enumerate(f):
50
  item = json.loads(line)
51
  yield idx, {
 
52
  "corpus_id": item["corpus_id"],
53
  "text": item["text"],
54
+ "image": item.get("image", ""),
 
55
  }
56
 
57
  elif split == "qrel":
58
+ with open(filepath, encoding="utf-8") as f:
59
  reader = csv.DictReader(f, delimiter="\t")
60
  for idx, row in enumerate(reader):
61
  yield idx, {
62
  "query_id": row["query_id"],
63
  "corpus_id": row["corpus_id"],
 
 
64
  "score": int(row["score"]),
65
  }
66
+
67
+ def _info(self):
68
+ split = datasets.Split(self.config.data_files["split"]) if hasattr(self.config, "data_files") else None
69
+ if split == "query":
70
+ features = datasets.Features({
71
+ "query_id": datasets.Value("string"),
72
+ "text": datasets.Value("string"),
73
+ "image": datasets.Value("string"),
74
+ })
75
+ elif split in {"corpus", "mixed_corpus"}:
76
+ features = datasets.Features({
77
+ "corpus_id": datasets.Value("string"),
78
+ "text": datasets.Value("string"),
79
+ "image": datasets.Value("string"),
80
+ })
81
+ elif split == "qrel":
82
+ features = datasets.Features({
83
+ "query_id": datasets.Value("string"),
84
+ "corpus_id": datasets.Value("string"),
85
+ "score": datasets.Value("int32"),
86
+ })
87
+ else:
88
+ features = datasets.Features({
89
+ "id": datasets.Value("string"),
90
+ "text": datasets.Value("string"),
91
+ })
92
+
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=features,
96
+ homepage=_HOMEPAGE,
97
+ )