|
|
import argparse |
|
|
import itertools |
|
|
import json |
|
|
import os |
|
|
import re |
|
|
from functools import partial |
|
|
from pathlib import Path |
|
|
import numpy as np |
|
|
import torch |
|
|
from qwen_vl_utils import process_vision_info |
|
|
from torchvision.ops.boxes import box_area |
|
|
from tqdm import tqdm |
|
|
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration |
|
|
import cv2 |
|
|
import pandas as pd |
|
|
import random |
|
|
|
|
|
import datetime |
|
|
from num2words import num2words |
|
|
import ast |
|
|
import warnings |
|
|
import csv |
|
|
|
|
|
CAPABILITY_NAME = 'category' |
|
|
def update_data_format(df_data): |
|
|
df_data = df_data.rename(columns={'capability': CAPABILITY_NAME}) |
|
|
if 'dataset_name' in df_data.columns: |
|
|
df_data['sub_task'] = df_data['dataset_name'] |
|
|
df_data.drop(columns=['dataset_name'], inplace=True) |
|
|
df_data.loc[df_data['sub_task'] == 'OCR-VQA', 'sub_task'] = 'BookOCR' |
|
|
df_data.loc[df_data[CAPABILITY_NAME] != 'Visual Reasoning', CAPABILITY_NAME] = 'Visual Perception' |
|
|
df_data['reasoning_type'] = df_data['reasoning_type'].fillna('') |
|
|
df_data.loc[df_data['reasoning_type'] == 'algebraic', 'reasoning_type'] = 'arithmetic' |
|
|
df_data.loc[df_data['task'] == 'Text Localization Bbox2Text', 'sub_task'] = 'Bbox2Text' |
|
|
df_data.loc[df_data['task'] == 'Text Localization Text2Bbox', 'sub_task'] = 'Text2Bbox' |
|
|
df_data.loc[df_data['task'] == 'Text Localization Bbox2Text', 'task'] = 'Text Localization' |
|
|
df_data.loc[df_data['task'] == 'Text Localization Text2Bbox', 'task'] = 'Text Localization' |
|
|
|
|
|
df_perception = df_data[df_data[CAPABILITY_NAME] == 'Visual Perception'] |
|
|
df_reasoning = df_data[df_data[CAPABILITY_NAME] == 'Visual Reasoning'].copy() |
|
|
df_reasoning['task'] = df_reasoning['reasoning_type'].str.title() |
|
|
df_reasoning['task'] = df_reasoning['task'].apply( |
|
|
lambda x: x + ' Reasoning' if x in ['Arithmetic', 'Logical', 'Spatial'] else x |
|
|
) |
|
|
df_reasoning = df_reasoning.drop(columns=['reasoning_type']) |
|
|
return pd.concat([df_perception, df_reasoning], ignore_index=True) |
|
|
return df_data |
|
|
|
|
|
answer_pattern = re.compile(r'<answer>(.*?)</answer>') |
|
|
markdown_json_pattern = re.compile(r'```json(.*?)```', re.DOTALL) |
|
|
def process_answer(answer): |
|
|
answer = answer.split('### Final Answer ###')[-1].strip() if '### Final Answer ###' in answer else answer |
|
|
answer = answer.split('Answer:')[-1].strip() if 'Answer:' in answer else answer |
|
|
matches = re.findall(answer_pattern, answer) |
|
|
answer = matches[-1] if matches else answer |
|
|
matches = re.findall(markdown_json_pattern, answer) |
|
|
answer = matches[-1] if matches else answer |
|
|
try: |
|
|
answer_json = json.loads(answer) |
|
|
answer = answer_json["answer"] |
|
|
except: |
|
|
pass |
|
|
return str(answer) |
|
|
|
|
|
class NoMatchedEquationError(Exception): |
|
|
pass |
|
|
|
|
|
|
|
|
def calculate(expression): |
|
|
try: |
|
|
exp = expression.split('=')[0].strip() |
|
|
node = ast.parse(exp, mode='eval') |
|
|
result = eval(compile(node, '<string>', 'eval')) |
|
|
return result |
|
|
except: |
|
|
return None |
|
|
|
|
|
def extract_and_judge(expression, target_result): |
|
|
expression = expression.replace(" ", "").replace("$", "") |
|
|
result = calculate(expression) |
|
|
if result is None: |
|
|
raise NoMatchedEquationError(f"none error") |
|
|
else: |
|
|
return target_result.strip() in str(result).strip() |
|
|
|
|
|
def check_relation(gt, ans, strict=False): |
|
|
|
|
|
if strict: |
|
|
delta_len = 25 |
|
|
else: |
|
|
delta_len = 200 |
|
|
if abs(len(str(ans)) - len(str(gt))) > 200: |
|
|
return False |
|
|
|
|
|
gt = str(gt).strip() |
|
|
ans = str(ans).strip() |
|
|
|
|
|
if gt.lower() == ans.lower(): |
|
|
return True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
aa = int(gt.replace(",", "").replace("$", "").replace("*", "").replace("**", "").replace("@", "").replace("-","").replace(" ", "").replace("%", "")) |
|
|
|
|
|
words_list = ans.split(" ") |
|
|
for i in words_list: |
|
|
if int(aa) == int(i.replace(",", "").replace("$", "").replace("*", "").replace("**", "").replace("@", "").replace("-", "").replace(" ", "").replace("%", "").replace("million","").replace("increase","").replace("decrease","")): |
|
|
if len(words_list)<5: |
|
|
return True |
|
|
except: |
|
|
pass |
|
|
|
|
|
try: |
|
|
aa = float(gt.replace(",", "").replace("$", "").replace("*", "").replace("**", "").replace("@", "").replace("-","").replace(" ", "").replace("%", "")) |
|
|
|
|
|
words_list = ans.split(" ") |
|
|
for i in words_list: |
|
|
if round(aa,4) ==round(float(i.replace(",", "").replace("$", "").replace("*", "").replace("**", "").replace("@", "").replace("-", "").replace(" ", "").replace("%", "").replace("million","").replace("increase","").replace("decrease","")),4): |
|
|
if len(words_list) < 3: |
|
|
return True |
|
|
except: |
|
|
pass |
|
|
|
|
|
|
|
|
try: |
|
|
a_f = float(ans.replace(",", "").replace("$", "")) |
|
|
g_f = float(gt.replace(",", "").replace("$", "")) |
|
|
if a_f == g_f: |
|
|
return True |
|
|
else: |
|
|
return False |
|
|
except: |
|
|
pass |
|
|
|
|
|
try: |
|
|
aa = extract_and_judge(ans, gt) |
|
|
return aa |
|
|
except: |
|
|
pass |
|
|
|
|
|
|
|
|
try: |
|
|
num_gt = float(gt.lstrip('$').replace(',', '').rstrip('%')) / 100 if "%" in gt else float( |
|
|
gt.lstrip('$').replace(',', '')) |
|
|
num_ans = float(ans.lstrip('$').replace(',', '').rstrip('%')) / 100 if "%" in ans else float( |
|
|
ans.lstrip('$').replace(',', '')) |
|
|
if num_gt == num_ans: |
|
|
return True |
|
|
else: |
|
|
return False |
|
|
except ValueError: |
|
|
pass |
|
|
|
|
|
|
|
|
try: |
|
|
num_gt = float(gt.lstrip('$').replace(',', '').rstrip('%')) / 100 if "%" in gt else float( |
|
|
gt.lstrip('$').replace(',', '')) |
|
|
if num2words(int(num_gt)).lower() == ans.lower(): |
|
|
return True |
|
|
else: |
|
|
return False |
|
|
except ValueError: |
|
|
pass |
|
|
|
|
|
|
|
|
aa = gt.lower() in ans.lower() |
|
|
if aa: |
|
|
try: |
|
|
a = int(ans.lower().replace(gt.lower(), "").replace(" ", "")) |
|
|
if a: |
|
|
return False |
|
|
else: |
|
|
return True |
|
|
except: |
|
|
return True |
|
|
|
|
|
def eval_mmdocbench(ans, gt_obj, strict=False): |
|
|
|
|
|
ans = process_answer(ans) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gt_ls = [] |
|
|
gt_obj_lst = json.loads(gt_obj) |
|
|
|
|
|
if type(gt_obj_lst[0]) == list: |
|
|
gt_obj_lst = gt_obj_lst[0] |
|
|
for item in gt_obj_lst: |
|
|
gt_ls.append(item["answer"]) |
|
|
|
|
|
|
|
|
cnt = 0 |
|
|
em = 0 |
|
|
for gt in gt_ls: |
|
|
if gt != "": |
|
|
cnt += 1 |
|
|
flag = check_relation(gt, ans, strict) |
|
|
if flag: |
|
|
em += 1 |
|
|
em /= cnt |
|
|
return em |