VLMEvalKit / vlmeval /dataset /mmdocbench.py
Racktic's picture
Upload folder using huggingface_hub
b5beb60 verified
import pathlib
from vlmeval.dataset.utils.mmdocbench_eval import update_data_format, eval_mmdocbench
from .image_base import ImageBaseDataset
from ..smp import *
class MMDocBench(ImageBaseDataset):
TYPE = 'MMDoc'
DATASET_URL = {
'MMDocBench': ''
}
DATASET_MD5 = {'MMDocBench': None}
@classmethod
def evaluate(self, eval_file, **judge_kwargs):
data = load(eval_file)
data2 = update_data_format(data)
data2.drop("task", axis=1, inplace=True)
data2.drop("sub_task", axis=1, inplace=True)
data2.drop("image_path", axis=1, inplace=True)
data2.drop("full_prediction", axis=1, inplace=True)
data2.drop("raw_question", axis=1, inplace=True)
data2.drop("question", axis=1, inplace=True)
data2 = json.loads(data2.to_json(orient="records"))
accs = {"Visual Perception": 0, "Visual Reasoning": 0}
cnts = {"Visual Perception": 0, "Visual Reasoning": 0}
for r in data2:
cnts[r["category"]] += 1
accs[r["category"]] += eval_mmdocbench("" if r["prediction"] is None else r["prediction"], r["answer"], strict=False)
for k in accs:
accs[k] = accs[k] / cnts[k]
accs["Overall"] = (accs["Visual Perception"] + accs["Visual Reasoning"]) / 2
df_score = pd.DataFrame.from_dict(accs, orient='index', columns=['accuracy'])
# save df_score to xlsx
df_score.to_excel(eval_file.replace('.xlsx', '_score.xlsx'), index=True)
return df_score
def build_prompt(self, line):
if isinstance(line, int):
line = self.data.iloc[line]
if self.meta_only:
tgt_path = toliststr(line['image_path'])
else:
tgt_path = self.dump_image(line)
question = line['question']
msgs = []
if isinstance(tgt_path, list):
msgs.extend([dict(type='image', value=p) for p in tgt_path])
else:
msgs = [dict(type='image', value=tgt_path)]
msgs.append(dict(type='text', value=question))
return msgs