pufanyi commited on
Commit
03c6175
·
1 Parent(s): 895b0e8

add MME.py

Browse files
Files changed (2) hide show
  1. MME.py +154 -0
  2. data/{MME.json → MME_images.json} +0 -0
MME.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ MIMIC-IT offers a diverse and extensive dataset of 2.8M multimodal instruction-response pairs, designed to enhance the performance of Vision-Language Models (VLMs) in real-life scenarios, enabling VLMs to excel in perception, reasoning, and planning while also catering to a multilingual audience.
16
+ """
17
+
18
+
19
+ import json
20
+ import pandas
21
+ import base64
22
+ import datasets
23
+
24
+
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @article{li2023mimicit,
28
+ title={MIMIC-IT: Multi-Modal In-Context Instruction Tuning},
29
+ author={Bo Li and Yuanhan Zhang and Liangyu Chen and Jinghao Wang and Fanyi Pu and Jingkang Yang and Chunyuan Li and Ziwei Liu},
30
+ year={2023},
31
+ eprint={2306.05425},
32
+ archivePrefix={arXiv},
33
+ primaryClass={cs.CV}
34
+ }
35
+ """
36
+
37
+ # You can copy an official description
38
+ _DESCRIPTION = """\
39
+ Multimodal Large Language Model (MLLM) relies on the powerful LLM to perform multimodal tasks, showing amazing emergent abilities in recent studies, such as writing poems based on an image. However, it is difficult for these case studies to fully reflect the performance of MLLM, lacking a comprehensive evaluation. In this paper, we fill in this blank, presenting the first MLLM Evaluation benchmark MME. It measures both perception and cognition abilities on a total of 14 subtasks. In order to avoid data leakage that may arise from direct use of public datasets for evaluation, the annotations of instruction-answer pairs are all manually designed. The concise instruction design allows us to fairly compare MLLMs, instead of struggling in prompt engineering. Besides, with such an instruction, we can also easily carry out quantitative statistics. A total of 12 advanced MLLMs are comprehensively evaluated on our MME, which not only suggests that existing MLLMs still have a large room for improvement, but also reveals the potential directions for the subsequent model optimization.
40
+ """
41
+
42
+ _HOMEPAGE = "https://github.com/BradyFU/Awesome-Multimodal-Large-Language-Models/tree/Evaluation"
43
+
44
+ _LICENSE = "MIT"
45
+
46
+ _SUBDATASETS = {
47
+ "MME": {
48
+ "instructions": ["MME"],
49
+ "description": "MLLM Evaluation",
50
+ },
51
+ }
52
+
53
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
+ _URLS = {
56
+ "MME": {
57
+ "instructions": "data/MME_instructions.json",
58
+ "images": "data/MME_images.json",
59
+ }
60
+ }
61
+
62
+
63
+ def get_builder_config(VERSION):
64
+ builder_config = [
65
+ datasets.BuilderConfig(
66
+ name="MME",
67
+ version=VERSION,
68
+ description="MLLM Evaluation",
69
+ )
70
+ ]
71
+ return builder_config
72
+
73
+
74
+ class MME(datasets.GeneratorBasedBuilder):
75
+ """
76
+ Multimodal Large Language Model (MLLM) relies on the powerful LLM to perform multimodal tasks, showing amazing emergent abilities in recent studies, such as writing poems based on an image. However, it is difficult for these case studies to fully reflect the performance of MLLM, lacking a comprehensive evaluation. In this paper, we fill in this blank, presenting the first MLLM Evaluation benchmark MME. It measures both perception and cognition abilities on a total of 14 subtasks. In order to avoid data leakage that may arise from direct use of public datasets for evaluation, the annotations of instruction-answer pairs are all manually designed. The concise instruction design allows us to fairly compare MLLMs, instead of struggling in prompt engineering. Besides, with such an instruction, we can also easily carry out quantitative statistics. A total of 12 advanced MLLMs are comprehensively evaluated on our MME, which not only suggests that existing MLLMs still have a large room for improvement, but also reveals the potential directions for the subsequent model optimization.
77
+ """
78
+
79
+ VERSION = datasets.Version("0.0.1")
80
+
81
+ BUILDER_CONFIGS = get_builder_config(VERSION)
82
+
83
+ def _info(self):
84
+ features = datasets.Features(
85
+ {
86
+ "id": datasets.Value("string"),
87
+ "instruction": datasets.Value("string"),
88
+ "answer": datasets.Value("string"),
89
+ "images": datasets.Sequence(datasets.Image()),
90
+ "related_instructions": datasets.Sequence(
91
+ datasets.Value(dtype="string")
92
+ ),
93
+ }
94
+ )
95
+ return datasets.DatasetInfo(
96
+ # This is the description that will appear on the datasets page.
97
+ description=_DESCRIPTION,
98
+ # This defines the different columns of the dataset and their types
99
+ features=features, # Here we define them above because they are different between the two configurations
100
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
101
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
102
+ # supervised_keys=("sentence", "label"),
103
+ # Homepage of the dataset for documentation
104
+ homepage=_HOMEPAGE,
105
+ # License for the dataset if available
106
+ license=_LICENSE,
107
+ # Citation for the dataset
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
113
+
114
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
115
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
116
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
117
+ data_dir = dl_manager.download_and_extract(
118
+ _URLS[self.config.name]["instructions"]
119
+ )
120
+ img_dir = dl_manager.download_and_extract(_URLS[self.config.name]["images"])
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TRAIN,
124
+ gen_kwargs={
125
+ "filepath": data_dir,
126
+ "images": img_dir,
127
+ },
128
+ ),
129
+ ]
130
+
131
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
132
+ def _generate_examples(self, filepath, images):
133
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
134
+ with open(images, "r") as f:
135
+ images_data = json.load(f)
136
+ images_data = {
137
+ id: base64.b64decode(img) for id, img in images_data.items()
138
+ }
139
+ with open(filepath, "r") as f:
140
+ data = json.load(f)
141
+ index = 0
142
+ for id, row in data["data"].items():
143
+ images = []
144
+ for img_id in row["image_ids"]:
145
+ if img_id in images_data:
146
+ images.append(images_data[img_id])
147
+ yield index, {
148
+ "id": id,
149
+ "instruction": row["instruction"],
150
+ "answer": row["answer"],
151
+ "images": images,
152
+ "related_instructions": row["rel_ins_ids"],
153
+ }
154
+ index += 1
data/{MME.json → MME_images.json} RENAMED
File without changes