mixed-modality-search commited on
Commit
88e3916
·
verified ·
1 Parent(s): 1506f3d

Add files using upload-large-folder tool

Browse files
Files changed (1) hide show
  1. MixBench25.py +59 -132
MixBench25.py CHANGED
@@ -1,113 +1,17 @@
1
- # import os
2
- # import json
3
- # import datasets
4
- # import csv
5
-
6
- # _DESCRIPTION = """\
7
- # MixBench is a benchmark for evaluating mixed-modality retrieval. It contains queries and corpora from four datasets: MSCOCO, Google_WIT, VisualNews, and OVEN. \
8
- # Each subset provides: query, corpus, mixed_corpus, and qrel splits.
9
- # """
10
-
11
-
12
- # _HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
13
-
14
-
15
- # _SUBSETS = ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
16
-
17
- # class MixBenchConfig(datasets.BuilderConfig):
18
- # def __init__(self, name, **kwargs):
19
- # if name not in _SUBSETS:
20
- # raise ValueError(f"Unknown subset: {name}. Choose from {_SUBSETS}")
21
- # super().__init__(name=name, version=datasets.Version("1.0.0"), **kwargs)
22
-
23
-
24
- # class MixBench(datasets.GeneratorBasedBuilder):
25
- # BUILDER_CONFIGS = [MixBenchConfig(name=subset) for subset in _SUBSETS]
26
-
27
- # def _info(self):
28
- # features = datasets.Features({
29
- # "query_id": datasets.Value("string"),
30
- # "corpus_id": datasets.Value("string"),
31
- # "text": datasets.Value("string"),
32
- # "image": datasets.Value("string"),
33
- # "score": datasets.Value("int32"),
34
- # })
35
- # return datasets.DatasetInfo(
36
- # description=_DESCRIPTION,
37
- # features=features,
38
- # homepage=_HOMEPAGE,
39
- # )
40
-
41
- # def _split_generators(self, dl_manager):
42
- # # download entire repo root and go to current subset folder
43
- # # data_dir = dl_manager.download_and_extract(".")
44
- # # subset_dir = os.path.join(data_dir, self.config.name)
45
- # # repo_root = dl_manager.manual_dir or os.path.join(os.path.dirname(__file__), self.config.name)
46
- # # subset_dir = os.path.join(repo_root, self.config.name)
47
- # subset_dir = os.path.join(dl_manager.manual_dir or dl_manager._base_path, self.config.name)
48
- # return [
49
- # datasets.SplitGenerator(
50
- # name="query",
51
- # gen_kwargs={"path": os.path.join(subset_dir, "queries.jsonl"), "split": "query"},
52
- # ),
53
- # datasets.SplitGenerator(
54
- # name="corpus",
55
- # gen_kwargs={"path": os.path.join(subset_dir, "corpus.jsonl"), "split": "corpus"},
56
- # ),
57
- # datasets.SplitGenerator(
58
- # name="mixed_corpus",
59
- # gen_kwargs={"path": os.path.join(subset_dir, "mixed_corpus.jsonl"), "split": "mixed_corpus"},
60
- # ),
61
- # datasets.SplitGenerator(
62
- # name="qrel",
63
- # gen_kwargs={"path": os.path.join(subset_dir, "qrels", "qrels.tsv"), "split": "qrel"},
64
- # ),
65
- # ]
66
-
67
- # def _generate_examples(self, path, split):
68
- # if split == "qrel":
69
- # # with open(path, encoding="utf-8") as f:
70
- # # for idx, line in enumerate(f):
71
- # # qid, did, score = line.strip().split()
72
- # # yield idx, {
73
- # # "query_id": qid,
74
- # # "corpus_id": did,
75
- # # "text": "",
76
- # # "image": "",
77
- # # "score": int(score),
78
- # # }
79
- # with open(path, encoding="utf-8") as f:
80
- # reader = csv.DictReader(f, delimiter="\t") # 使用 DictReader 读取有表头的 tsv
81
- # for idx, row in enumerate(reader):
82
- # yield idx, {
83
- # "query_id": row["query_id"],
84
- # "corpus_id": row["corpus_id"],
85
- # "score": int(row["score"]),
86
- # }
87
- # else:
88
- # with open(path, encoding="utf-8") as f:
89
- # for idx, line in enumerate(f):
90
- # row = json.loads(line)
91
- # yield idx, {
92
- # "query_id": row.get("query_id", ""),
93
- # "corpus_id": row.get("corpus_id", ""),
94
- # "text": row.get("text", ""),
95
- # "image": row.get("image", ""),
96
- # "score": 0,
97
- # }
98
-
99
  import os
100
  import json
101
  import datasets
102
- import csv
103
 
104
- _DESCRIPTION = """
105
- MixBench is a benchmark for evaluating mixed-modality retrieval. It contains queries and corpora from four datasets: MSCOCO, Google_WIT, VisualNews, and OVEN.
106
  Each subset provides: query, corpus, mixed_corpus, and qrel splits.
107
  """
108
 
 
109
  _HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
110
 
 
111
  _SUBSETS = ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
112
 
113
  class MixBenchConfig(datasets.BuilderConfig):
@@ -116,22 +20,64 @@ class MixBenchConfig(datasets.BuilderConfig):
116
  raise ValueError(f"Unknown subset: {name}. Choose from {_SUBSETS}")
117
  super().__init__(name=name, version=datasets.Version("1.0.0"), **kwargs)
118
 
 
119
  class MixBench(datasets.GeneratorBasedBuilder):
120
  BUILDER_CONFIGS = [MixBenchConfig(name=subset) for subset in _SUBSETS]
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  def _split_generators(self, dl_manager):
 
 
 
 
 
123
  subset_dir = os.path.join(dl_manager.manual_dir or dl_manager._base_path, self.config.name)
124
  return [
125
- datasets.SplitGenerator(name="query", gen_kwargs={"path": os.path.join(subset_dir, "queries.jsonl"), "split": "query"}),
126
- datasets.SplitGenerator(name="corpus", gen_kwargs={"path": os.path.join(subset_dir, "corpus.jsonl"), "split": "corpus"}),
127
- datasets.SplitGenerator(name="mixed_corpus", gen_kwargs={"path": os.path.join(subset_dir, "mixed_corpus.jsonl"), "split": "mixed_corpus"}),
128
- datasets.SplitGenerator(name="qrel", gen_kwargs={"path": os.path.join(subset_dir, "qrels", "qrels.tsv"), "split": "qrel"}),
 
 
 
 
 
 
 
 
 
 
 
 
129
  ]
130
 
131
  def _generate_examples(self, path, split):
132
  if split == "qrel":
 
 
 
 
 
 
 
 
 
 
133
  with open(path, encoding="utf-8") as f:
134
- reader = csv.DictReader(f, delimiter="\t")
135
  for idx, row in enumerate(reader):
136
  yield idx, {
137
  "query_id": row["query_id"],
@@ -142,29 +88,10 @@ class MixBench(datasets.GeneratorBasedBuilder):
142
  with open(path, encoding="utf-8") as f:
143
  for idx, line in enumerate(f):
144
  row = json.loads(line)
145
- if split == "query":
146
- yield idx, {
147
- "query_id": row["query_id"],
148
- "text": row.get("text", ""),
149
- "image": row.get("image", ""),
150
- }
151
- else: # corpus or mixed_corpus
152
- yield idx, {
153
- "corpus_id": row["corpus_id"],
154
- "text": row.get("text", ""),
155
- "image": row.get("image", ""),
156
- }
157
-
158
- def _info(self):
159
- # Provide all possible features but only used in relevant splits
160
- return datasets.DatasetInfo(
161
- description=_DESCRIPTION,
162
- homepage=_HOMEPAGE,
163
- features=datasets.Features({
164
- "query_id": datasets.Value("string"),
165
- "corpus_id": datasets.Value("string"),
166
- "text": datasets.Value("string"),
167
- "image": datasets.Value("string"),
168
- "score": datasets.Value("int32"),
169
- })
170
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import json
3
  import datasets
4
+ import csv
5
 
6
+ _DESCRIPTION = """\
7
+ MixBench is a benchmark for evaluating mixed-modality retrieval. It contains queries and corpora from four datasets: MSCOCO, Google_WIT, VisualNews, and OVEN. \
8
  Each subset provides: query, corpus, mixed_corpus, and qrel splits.
9
  """
10
 
11
+
12
  _HOMEPAGE = "https://huggingface.co/datasets/mixed-modality-search/MixBench25"
13
 
14
+
15
  _SUBSETS = ["MSCOCO", "Google_WIT", "VisualNews", "OVEN"]
16
 
17
  class MixBenchConfig(datasets.BuilderConfig):
 
20
  raise ValueError(f"Unknown subset: {name}. Choose from {_SUBSETS}")
21
  super().__init__(name=name, version=datasets.Version("1.0.0"), **kwargs)
22
 
23
+
24
  class MixBench(datasets.GeneratorBasedBuilder):
25
  BUILDER_CONFIGS = [MixBenchConfig(name=subset) for subset in _SUBSETS]
26
 
27
+ def _info(self):
28
+ features = datasets.Features({
29
+ "query_id": datasets.Value("string"),
30
+ "corpus_id": datasets.Value("string"),
31
+ "text": datasets.Value("string"),
32
+ "image": datasets.Value("string"),
33
+ "score": datasets.Value("int32"),
34
+ })
35
+ return datasets.DatasetInfo(
36
+ description=_DESCRIPTION,
37
+ features=features,
38
+ homepage=_HOMEPAGE,
39
+ )
40
+
41
  def _split_generators(self, dl_manager):
42
+ # download entire repo root and go to current subset folder
43
+ # data_dir = dl_manager.download_and_extract(".")
44
+ # subset_dir = os.path.join(data_dir, self.config.name)
45
+ # repo_root = dl_manager.manual_dir or os.path.join(os.path.dirname(__file__), self.config.name)
46
+ # subset_dir = os.path.join(repo_root, self.config.name)
47
  subset_dir = os.path.join(dl_manager.manual_dir or dl_manager._base_path, self.config.name)
48
  return [
49
+ datasets.SplitGenerator(
50
+ name="query",
51
+ gen_kwargs={"path": os.path.join(subset_dir, "queries.jsonl"), "split": "query"},
52
+ ),
53
+ datasets.SplitGenerator(
54
+ name="corpus",
55
+ gen_kwargs={"path": os.path.join(subset_dir, "corpus.jsonl"), "split": "corpus"},
56
+ ),
57
+ datasets.SplitGenerator(
58
+ name="mixed_corpus",
59
+ gen_kwargs={"path": os.path.join(subset_dir, "mixed_corpus.jsonl"), "split": "mixed_corpus"},
60
+ ),
61
+ datasets.SplitGenerator(
62
+ name="qrel",
63
+ gen_kwargs={"path": os.path.join(subset_dir, "qrels", "qrels.tsv"), "split": "qrel"},
64
+ ),
65
  ]
66
 
67
  def _generate_examples(self, path, split):
68
  if split == "qrel":
69
+ # with open(path, encoding="utf-8") as f:
70
+ # for idx, line in enumerate(f):
71
+ # qid, did, score = line.strip().split()
72
+ # yield idx, {
73
+ # "query_id": qid,
74
+ # "corpus_id": did,
75
+ # "text": "",
76
+ # "image": "",
77
+ # "score": int(score),
78
+ # }
79
  with open(path, encoding="utf-8") as f:
80
+ reader = csv.DictReader(f, delimiter="\t") # 使用 DictReader 读取有表头的 tsv
81
  for idx, row in enumerate(reader):
82
  yield idx, {
83
  "query_id": row["query_id"],
 
88
  with open(path, encoding="utf-8") as f:
89
  for idx, line in enumerate(f):
90
  row = json.loads(line)
91
+ yield idx, {
92
+ "query_id": row.get("query_id", ""),
93
+ "corpus_id": row.get("corpus_id", ""),
94
+ "text": row.get("text", ""),
95
+ "image": row.get("image", ""),
96
+ "score": 0,
97
+ }