Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/egoexobench.py +301 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/page_ocr_metric.py +51 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/parallel.py +51 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/requirements.txt +13 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/script.py +451 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_metric.py +185 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/vqa_metric.py +282 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/cgbench.py +620 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__init__.py +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/global_config.py +61 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/chart_type_evaluator.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/color_evaluator.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/layout_evaluator.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_evaluator_prefix.py +372 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/color_evaluator.py +326 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/color_evaluator_prefix.py +835 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/color_utils.py +85 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/grid_evaluator.py +181 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/layout_evaluator.py +166 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/legend_evaluator.py +194 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/text_evaluator.py +202 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/crpe.py +13 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mathverse.py +193 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/utils.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/__pycache__/answer_str_parse.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/__pycache__/dummy_parse.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/__pycache__/json_parse.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/__pycache__/parsers.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/__pycache__/utils.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/parsers.py +145 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/utils.py +138 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/ascii_art_gpt4o_judge.py +126 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/common/conversions.py +244 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/common/metrics.py +102 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/common/transformations.py +120 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_equality.py +44 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_exact_match_agg_recall.py +27 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/general_numerical_match.py +253 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/longest_common_list_prefix_ratio.py +15 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/mse.py +64 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/multi_ref_phrase.py +26 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/nbbox_iou.py +104 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/near_str_match.py +23 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/number_rel_diff_ratio.py +22 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/positive_int_match.py +31 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/set_precision.py +16 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/symbolic_planning.py +266 -0
VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (211 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/egoexobench.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import huggingface_hub
|
| 2 |
+
from huggingface_hub import snapshot_download
|
| 3 |
+
from ...smp import *
|
| 4 |
+
from ..video_base import VideoBaseDataset
|
| 5 |
+
from ..utils import build_judge, DEBUG_MESSAGE
|
| 6 |
+
import torchvision.transforms as T
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import os
|
| 10 |
+
import re
|
| 11 |
+
from .utils import *
|
| 12 |
+
import torch
|
| 13 |
+
import shutil
|
| 14 |
+
import glob
|
| 15 |
+
|
| 16 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class EgoExoBench_MCQ(VideoBaseDataset):
|
| 20 |
+
MD5 = '9c0aa8da235d766d02dd7e9a19182719'
|
| 21 |
+
TYPE = 'Video-MCQ'
|
| 22 |
+
|
| 23 |
+
def __init__(self, dataset='EgoExoBench_MCQ', nframe=64, skip_EgoExo4D=False):
|
| 24 |
+
super().__init__(dataset=dataset, nframe=nframe)
|
| 25 |
+
self.frame_fps = 2
|
| 26 |
+
self.skip_EgoExo4D = skip_EgoExo4D
|
| 27 |
+
|
| 28 |
+
@classmethod
|
| 29 |
+
def supported_datasets(cls):
|
| 30 |
+
return ['EgoExoBench_MCQ']
|
| 31 |
+
|
| 32 |
+
def prepare_dataset(self, dataset_name='EgoExoBench_MCQ', repo_id='Heleun/EgoExoBench_MCQ', video_repo_id='onlyfaces/EgoExoBench'): # noqa: E501
|
| 33 |
+
def check_integrity(pth):
|
| 34 |
+
data_file = osp.join(pth, f'{dataset_name}.tsv')
|
| 35 |
+
|
| 36 |
+
if not osp.exists(data_file):
|
| 37 |
+
return False
|
| 38 |
+
|
| 39 |
+
if md5(data_file) != self.MD5:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
return True
|
| 43 |
+
cache_path = get_cache_path(repo_id)
|
| 44 |
+
self.video_root = os.path.join(LMUDataRoot(), 'videos', 'EgoExoBench')
|
| 45 |
+
os.makedirs(self.video_root, exist_ok=True)
|
| 46 |
+
if not osp.exists(osp.join(self.video_root, 'processed_videos')) or not osp.exists(osp.join(self.video_root, 'processed_frames')): # noqa: E501
|
| 47 |
+
snapshot_download(
|
| 48 |
+
repo_id=video_repo_id,
|
| 49 |
+
repo_type='dataset',
|
| 50 |
+
allow_patterns=['*.tar.gz.part*'],
|
| 51 |
+
local_dir=self.video_root
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def combine_and_extract(root_dir, prefix, remove_parts=True):
|
| 55 |
+
parts_pattern = osp.join(root_dir, f'{prefix}.tar.gz.part*')
|
| 56 |
+
combined_archive = osp.join(root_dir, f'{prefix}.tar.gz')
|
| 57 |
+
if not osp.exists(combined_archive):
|
| 58 |
+
parts = sorted(glob.glob(parts_pattern))
|
| 59 |
+
with open(combined_archive, 'wb') as outfile:
|
| 60 |
+
for part in parts:
|
| 61 |
+
with open(part, 'rb') as infile:
|
| 62 |
+
shutil.copyfileobj(infile, outfile)
|
| 63 |
+
shutil.unpack_archive(combined_archive, root_dir)
|
| 64 |
+
if remove_parts:
|
| 65 |
+
for part in parts:
|
| 66 |
+
os.remove(part)
|
| 67 |
+
os.remove(combined_archive)
|
| 68 |
+
|
| 69 |
+
combine_and_extract(self.video_root, 'processed_videos')
|
| 70 |
+
combine_and_extract(self.video_root, 'processed_frames')
|
| 71 |
+
|
| 72 |
+
if cache_path is not None and check_integrity(cache_path):
|
| 73 |
+
dataset_path = cache_path
|
| 74 |
+
else:
|
| 75 |
+
dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
|
| 76 |
+
|
| 77 |
+
data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
|
| 78 |
+
|
| 79 |
+
# transform
|
| 80 |
+
self.transform = T.Compose([
|
| 81 |
+
Stack(),
|
| 82 |
+
ToTorchFormatTensor()
|
| 83 |
+
])
|
| 84 |
+
|
| 85 |
+
return dict(root=dataset_path, data_file=data_file)
|
| 86 |
+
|
| 87 |
+
def get_index(self, bound, fps, max_frame, first_idx=0, num_segments=16):
|
| 88 |
+
start, end = bound if bound else (-100000, 100000)
|
| 89 |
+
start_idx = max(first_idx, round(start * fps))
|
| 90 |
+
end_idx = min(round(end * fps), max_frame)
|
| 91 |
+
seg_size = (end_idx - start_idx) / num_segments
|
| 92 |
+
mid_seg_size = seg_size / 2
|
| 93 |
+
indices = np.arange(num_segments)
|
| 94 |
+
frame_indices = start_idx + mid_seg_size + np.round(seg_size * indices)
|
| 95 |
+
return frame_indices.astype(int)
|
| 96 |
+
|
| 97 |
+
def load_into_video_and_process(self, media, mcq_idx):
|
| 98 |
+
try:
|
| 99 |
+
from moviepy.editor import VideoFileClip, ImageSequenceClip
|
| 100 |
+
except:
|
| 101 |
+
raise ImportError(
|
| 102 |
+
'MoviePy is not installed, please install it by running "pip install moviepy==1.0.3"'
|
| 103 |
+
)
|
| 104 |
+
video_root = self.video_root
|
| 105 |
+
if media['type'] in ['image']:
|
| 106 |
+
original_image_path = osp.join(video_root, media['image_paths'][0])
|
| 107 |
+
processed_video_path = osp.join(video_root, 'processed_videos', f'{mcq_idx}.jpg')
|
| 108 |
+
if not os.path.exists(processed_video_path):
|
| 109 |
+
shutil.copy(original_image_path, processed_video_path)
|
| 110 |
+
return dict(type='image', value=processed_video_path)
|
| 111 |
+
elif media['type'] in ['frames']:
|
| 112 |
+
input_images = [osp.join(video_root, im) for im in media['image_paths']]
|
| 113 |
+
processed_video_path = osp.join(video_root, 'processed_videos', f'{mcq_idx}.mp4')
|
| 114 |
+
media['nframes'] = len(input_images) // 2 * 2
|
| 115 |
+
if not os.path.exists(processed_video_path):
|
| 116 |
+
# using MoviePy to transform images into mp4
|
| 117 |
+
image_files = sorted(input_images)
|
| 118 |
+
image_clip = ImageSequenceClip(image_files, fps=self.frame_fps)
|
| 119 |
+
image_clip.write_videofile(processed_video_path, codec='libx264')
|
| 120 |
+
image_clip.close()
|
| 121 |
+
elif media['type'] in ['video']:
|
| 122 |
+
original_video_path = osp.join(video_root, media['video_path'])
|
| 123 |
+
processed_video_path = osp.join(video_root, 'processed_videos', f'{mcq_idx}.mp4')
|
| 124 |
+
if 'video_start' in media and 'video_end' in media and media['video_start'] is not None and media['video_end'] is not None: # noqa: E501
|
| 125 |
+
video_start, video_end = media['video_start'], media['video_end']
|
| 126 |
+
if not os.path.exists(processed_video_path):
|
| 127 |
+
video_clip = VideoFileClip(original_video_path)
|
| 128 |
+
clip = video_clip.subclip(video_start, min(video_end, video_clip.duration))
|
| 129 |
+
clip.write_videofile(processed_video_path)
|
| 130 |
+
clip.close()
|
| 131 |
+
else:
|
| 132 |
+
if not os.path.exists(processed_video_path):
|
| 133 |
+
shutil.copy(original_video_path, processed_video_path)
|
| 134 |
+
else:
|
| 135 |
+
raise ValueError(f"Unsupported media type: {media['type']}")
|
| 136 |
+
|
| 137 |
+
return dict(type='video', value=processed_video_path, nframes=media.get('nframes', 8))
|
| 138 |
+
|
| 139 |
+
def save_video_into_images(self, media, mcq_idx):
|
| 140 |
+
bound = None
|
| 141 |
+
video_root = self.video_root
|
| 142 |
+
|
| 143 |
+
if media['type'] in ['frames', 'image']:
|
| 144 |
+
media_paths = [osp.join(video_root, im) for im in media['image_paths']]
|
| 145 |
+
save_dir = osp.join(video_root, 'processed_frames', str(mcq_idx))
|
| 146 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 147 |
+
input_images = []
|
| 148 |
+
for media_path in media_paths:
|
| 149 |
+
img_path = media_path.split('/')[-1]
|
| 150 |
+
save_image_path = osp.join(save_dir, img_path)
|
| 151 |
+
shutil.copy(media_path, save_image_path)
|
| 152 |
+
input_images.append(save_image_path)
|
| 153 |
+
return input_images
|
| 154 |
+
|
| 155 |
+
if 'video_start' in media and 'video_end' in media and media['video_start'] is not None and media['video_end'] is not None: # noqa: E501
|
| 156 |
+
bound = (
|
| 157 |
+
media['video_start'], media['video_end']
|
| 158 |
+
)
|
| 159 |
+
video_path = os.path.join(video_root, media['video_path'])
|
| 160 |
+
|
| 161 |
+
def read_video(video_path, bound=None, num_segments=16):
|
| 162 |
+
from decord import VideoReader, cpu
|
| 163 |
+
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
| 164 |
+
max_frame = len(vr) - 1
|
| 165 |
+
fps = float(vr.get_avg_fps())
|
| 166 |
+
|
| 167 |
+
images_group = list()
|
| 168 |
+
frame_indices = self.get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
|
| 169 |
+
save_dir = osp.join(video_root, 'processed_frames', str(mcq_idx))
|
| 170 |
+
|
| 171 |
+
if osp.exists(save_dir) and len(os.listdir(save_dir)) > 0:
|
| 172 |
+
return None, frame_indices
|
| 173 |
+
|
| 174 |
+
for frame_index in frame_indices:
|
| 175 |
+
img = Image.fromarray(vr[frame_index].asnumpy())
|
| 176 |
+
images_group.append(img)
|
| 177 |
+
torch_imgs = self.transform(images_group)
|
| 178 |
+
return torch_imgs, frame_indices
|
| 179 |
+
|
| 180 |
+
def save_video_frames(imgs, video_root, frame_indices, mcq_idx):
|
| 181 |
+
save_dir = osp.join(video_root, 'processed_frames', str(mcq_idx))
|
| 182 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 183 |
+
frame_paths = [osp.join(save_dir, f'{fidx:07d}.jpg') for fidx in frame_indices]
|
| 184 |
+
|
| 185 |
+
flag = np.all([osp.exists(pth) for pth in frame_paths])
|
| 186 |
+
|
| 187 |
+
if not flag:
|
| 188 |
+
block_size = imgs.size(0) // len(frame_indices)
|
| 189 |
+
split_tensors = torch.split(imgs, block_size)
|
| 190 |
+
to_pil = transforms.ToPILImage()
|
| 191 |
+
images = [to_pil(arr) for arr in split_tensors]
|
| 192 |
+
for im, pth in zip(images, frame_paths):
|
| 193 |
+
if not osp.exists(pth):
|
| 194 |
+
im.save(pth)
|
| 195 |
+
|
| 196 |
+
return frame_paths
|
| 197 |
+
|
| 198 |
+
torch_imgs, frame_indices = read_video(video_path, bound, media['nframes'])
|
| 199 |
+
img_frame_paths = save_video_frames(torch_imgs, video_root, frame_indices, mcq_idx)
|
| 200 |
+
return img_frame_paths
|
| 201 |
+
|
| 202 |
+
def process_text_and_media(self, text, media_list, video_llm, mcq_idx):
|
| 203 |
+
|
| 204 |
+
message = []
|
| 205 |
+
chunks = re.split(r'(<image>|<video>)', text)
|
| 206 |
+
media_index = 0
|
| 207 |
+
media_list = eval(media_list)
|
| 208 |
+
|
| 209 |
+
placeholder_count = sum(1 for chunk in chunks if chunk in ['<image>', '<video>'])
|
| 210 |
+
assert placeholder_count == len(media_list), \
|
| 211 |
+
f"Placeholder count {placeholder_count} does not match media list length {len(media_list)}."
|
| 212 |
+
|
| 213 |
+
for chunk in chunks:
|
| 214 |
+
if chunk in ['<image>', '<video>']:
|
| 215 |
+
if video_llm:
|
| 216 |
+
media_content = self.load_into_video_and_process(media_list[media_index], f'question{mcq_idx}_video{media_index}') # noqa: E501
|
| 217 |
+
message.append(media_content)
|
| 218 |
+
else:
|
| 219 |
+
# Save the video as individual image frames for processing
|
| 220 |
+
img_frame_paths = self.save_video_into_images(media_list[media_index], f'question{mcq_idx}_video{media_index}') # noqa: E501
|
| 221 |
+
for im in img_frame_paths:
|
| 222 |
+
message.append(dict(type='image', value=im))
|
| 223 |
+
|
| 224 |
+
media_index += 1
|
| 225 |
+
elif chunk.strip():
|
| 226 |
+
message.append(dict(type='text', value=chunk.strip()))
|
| 227 |
+
|
| 228 |
+
return message
|
| 229 |
+
|
| 230 |
+
def build_prompt(self, line, video_llm):
|
| 231 |
+
if isinstance(line, int):
|
| 232 |
+
mcq_idx = line
|
| 233 |
+
assert line < len(self)
|
| 234 |
+
line = self.data.iloc[line]
|
| 235 |
+
mcq_idx = int(line['index'])
|
| 236 |
+
if self.skip_EgoExo4D and 'EgoExo4D' in line['medias']:
|
| 237 |
+
return None
|
| 238 |
+
text = line['question'] + '\nOptions:\n' + line['options'] + '\n' + line['response_format']
|
| 239 |
+
message = self.process_text_and_media(text, line['medias'], video_llm, mcq_idx)
|
| 240 |
+
return message
|
| 241 |
+
|
| 242 |
+
# It returns a dictionary
|
| 243 |
+
@classmethod
|
| 244 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
| 245 |
+
from .utils import get_dimension_rating, extract_characters_regex, extract_option
|
| 246 |
+
|
| 247 |
+
assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], \
|
| 248 |
+
'data file should be an supported format (xlsx/json/tsv) file'
|
| 249 |
+
|
| 250 |
+
tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
|
| 251 |
+
tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json')
|
| 252 |
+
score_file = get_intermediate_file_path(eval_file, '_score', 'csv')
|
| 253 |
+
|
| 254 |
+
if not osp.exists(score_file):
|
| 255 |
+
model = judge_kwargs.get('model', 'exact_matching')
|
| 256 |
+
assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
|
| 257 |
+
|
| 258 |
+
if model == 'exact_matching':
|
| 259 |
+
model = None
|
| 260 |
+
elif gpt_key_set():
|
| 261 |
+
model = build_judge(**judge_kwargs)
|
| 262 |
+
if not model.working():
|
| 263 |
+
warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
|
| 264 |
+
warnings.warn(DEBUG_MESSAGE)
|
| 265 |
+
model = None
|
| 266 |
+
else:
|
| 267 |
+
warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
|
| 268 |
+
model = None
|
| 269 |
+
res = {} if not osp.exists(tmp_file) else load(tmp_file)
|
| 270 |
+
res = {k: v for k, v in res.items() if FAIL_MSG not in v}
|
| 271 |
+
|
| 272 |
+
data = load(eval_file)
|
| 273 |
+
data_un = data[~pd.isna(data['prediction'])]
|
| 274 |
+
|
| 275 |
+
for idx in data['index']:
|
| 276 |
+
ans = data.loc[data['index'] == idx, 'answer'].values[0]
|
| 277 |
+
pred = data.loc[data['index'] == idx, 'prediction'].values[0]
|
| 278 |
+
|
| 279 |
+
if extract_characters_regex(pred) == '':
|
| 280 |
+
extract_pred = extract_option(
|
| 281 |
+
model,
|
| 282 |
+
data.loc[data['index'] == idx].to_dict(orient='records')[0],
|
| 283 |
+
'EgoExoBench_MCQ',
|
| 284 |
+
)
|
| 285 |
+
data.loc[idx, 'score'] = int(extract_pred == ans)
|
| 286 |
+
else:
|
| 287 |
+
data.loc[idx, 'score'] = int(extract_characters_regex(pred) == ans)
|
| 288 |
+
|
| 289 |
+
rejected = [x for x in data['score'] if x == -1]
|
| 290 |
+
|
| 291 |
+
print(
|
| 292 |
+
f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
|
| 293 |
+
f'failed to obtain the score for another {len(rejected)} questions. '
|
| 294 |
+
f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.'
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
dump(data, score_file)
|
| 298 |
+
|
| 299 |
+
rating = get_dimension_rating(score_file)
|
| 300 |
+
dump(rating, tgt_file)
|
| 301 |
+
return rating
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/page_ocr_metric.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import argparse
|
| 3 |
+
import nltk
|
| 4 |
+
from nltk.metrics import precision, recall, f_measure
|
| 5 |
+
import numpy as np
|
| 6 |
+
import jieba
|
| 7 |
+
import re
|
| 8 |
+
from nltk.translate import meteor_score
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def contain_chinese_string(text):
|
| 12 |
+
chinese_pattern = re.compile(r'[\u4e00-\u9fa5]')
|
| 13 |
+
return bool(chinese_pattern.search(text))
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def cal_per_metrics(pred, gt):
|
| 17 |
+
metrics = {}
|
| 18 |
+
|
| 19 |
+
if contain_chinese_string(gt) or contain_chinese_string(pred):
|
| 20 |
+
reference = jieba.lcut(gt)
|
| 21 |
+
hypothesis = jieba.lcut(pred)
|
| 22 |
+
else:
|
| 23 |
+
reference = gt.split()
|
| 24 |
+
hypothesis = pred.split()
|
| 25 |
+
|
| 26 |
+
metrics["bleu"] = nltk.translate.bleu([reference], hypothesis)
|
| 27 |
+
metrics["meteor"] = meteor_score.meteor_score([reference], hypothesis)
|
| 28 |
+
|
| 29 |
+
reference = set(reference)
|
| 30 |
+
hypothesis = set(hypothesis)
|
| 31 |
+
metrics["f_measure"] = f_measure(reference, hypothesis)
|
| 32 |
+
|
| 33 |
+
metrics["precision"] = precision(reference, hypothesis)
|
| 34 |
+
metrics["recall"] = recall(reference, hypothesis)
|
| 35 |
+
metrics["edit_dist"] = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))
|
| 36 |
+
return metrics
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
if __name__ == "__main__":
|
| 40 |
+
|
| 41 |
+
# Examples for region text recognition and read all text tasks
|
| 42 |
+
predict_text = "metrics['edit_dist'] = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))"
|
| 43 |
+
true_text = "metrics = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))"
|
| 44 |
+
|
| 45 |
+
scores = cal_per_metrics(predict_text, true_text)
|
| 46 |
+
|
| 47 |
+
predict_text = "metrics['edit_dist'] len(gt))"
|
| 48 |
+
true_text = "metrics = nltk.edit_distance(pred, gt) / max(len(pred), len(gt))"
|
| 49 |
+
|
| 50 |
+
scores = cal_per_metrics(predict_text, true_text)
|
| 51 |
+
print(scores)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/parallel.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tqdm import tqdm
|
| 2 |
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=0):
|
| 6 |
+
"""
|
| 7 |
+
A parallel version of the map function with a progress bar.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
array (array-like): An array to iterate over.
|
| 11 |
+
function (function): A python function to apply to the elements of array
|
| 12 |
+
n_jobs (int, default=16): The number of cores to use
|
| 13 |
+
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
|
| 14 |
+
keyword arguments to function
|
| 15 |
+
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
|
| 16 |
+
Useful for catching bugs
|
| 17 |
+
Returns:
|
| 18 |
+
[function(array[0]), function(array[1]), ...]
|
| 19 |
+
"""
|
| 20 |
+
# We run the first few iterations serially to catch bugs
|
| 21 |
+
if front_num > 0:
|
| 22 |
+
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
|
| 23 |
+
else:
|
| 24 |
+
front = []
|
| 25 |
+
# If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
|
| 26 |
+
if n_jobs == 1:
|
| 27 |
+
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
|
| 28 |
+
# Assemble the workers
|
| 29 |
+
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
|
| 30 |
+
# Pass the elements of array into function
|
| 31 |
+
if use_kwargs:
|
| 32 |
+
futures = [pool.submit(function, **a) for a in array[front_num:]]
|
| 33 |
+
else:
|
| 34 |
+
futures = [pool.submit(function, a) for a in array[front_num:]]
|
| 35 |
+
kwargs = {
|
| 36 |
+
'total': len(futures),
|
| 37 |
+
'unit': 'it',
|
| 38 |
+
'unit_scale': True,
|
| 39 |
+
'leave': True
|
| 40 |
+
}
|
| 41 |
+
# Print out the progress as tasks complete
|
| 42 |
+
for f in tqdm(as_completed(futures), **kwargs):
|
| 43 |
+
pass
|
| 44 |
+
out = []
|
| 45 |
+
# Get the results from the futures.
|
| 46 |
+
for i, future in tqdm(enumerate(futures)):
|
| 47 |
+
try:
|
| 48 |
+
out.append(future.result())
|
| 49 |
+
except Exception as e:
|
| 50 |
+
out.append(e)
|
| 51 |
+
return front + out
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
apted
|
| 2 |
+
distance
|
| 3 |
+
distance
|
| 4 |
+
editdistance
|
| 5 |
+
ipdb
|
| 6 |
+
jieba
|
| 7 |
+
Levenshtein
|
| 8 |
+
lxml
|
| 9 |
+
nltk
|
| 10 |
+
numpy
|
| 11 |
+
Polygon3
|
| 12 |
+
tqdm
|
| 13 |
+
zss
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/script.py
ADDED
|
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
#!/usr/bin/env python
|
| 3 |
+
# encoding=utf8
|
| 4 |
+
#File: E2E_iou_1_1.py
|
| 5 |
+
#Version: 1.1
|
| 6 |
+
#Version info: changes for Python 3
|
| 7 |
+
#Date: 2019-12-29
|
| 8 |
+
#Description: Evaluation script that computes End to End Recognition. For Text Localization it's used Intersection over Union criteria.
|
| 9 |
+
#Average Precision is also calcuted when 'CONFIDENCES' parameter is True
|
| 10 |
+
#There are 2 modes to determine if a detection is correct or not:
|
| 11 |
+
#with Word Spotting: The detected word must coincide (ingnoring case) to a filtered Ground Truth containing only dictionary words (see include_in_dictionary and include_in_dictionary_transcription functions)
|
| 12 |
+
#without Word Spotting: words must be equal excluding a set of special characters
|
| 13 |
+
|
| 14 |
+
from collections import namedtuple
|
| 15 |
+
import vlmeval.dataset.utils.Ocrbench_v2.spotting_eval.rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs
|
| 16 |
+
import importlib
|
| 17 |
+
|
| 18 |
+
def evaluation_imports():
|
| 19 |
+
"""
|
| 20 |
+
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
|
| 21 |
+
"""
|
| 22 |
+
return {
|
| 23 |
+
'Polygon':'plg',
|
| 24 |
+
'numpy':'np'
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
def default_evaluation_params():
|
| 28 |
+
"""
|
| 29 |
+
default_evaluation_params: Default parameters to use for the validation and evaluation.
|
| 30 |
+
"""
|
| 31 |
+
return {
|
| 32 |
+
'IOU_CONSTRAINT' :0.5,
|
| 33 |
+
'AREA_PRECISION_CONSTRAINT' :0.5,
|
| 34 |
+
'WORD_SPOTTING' :False,
|
| 35 |
+
'MIN_LENGTH_CARE_WORD' :3,
|
| 36 |
+
'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt',
|
| 37 |
+
'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt',
|
| 38 |
+
'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
|
| 39 |
+
'CRLF':False, # Lines are delimited by Windows CRLF format
|
| 40 |
+
'CONFIDENCES':False, #Detections must include confidence value. AP will be calculated,
|
| 41 |
+
'SPECIAL_CHARACTERS':'!?.:,*"()·[]/\'',
|
| 42 |
+
'ONLY_REMOVE_FIRST_LAST_CHARACTER' : True
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
def validate_data(gtFilePath, submFilePath, evaluationParams):
|
| 46 |
+
"""
|
| 47 |
+
Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
|
| 48 |
+
Validates also that there are no missing files in the folder.
|
| 49 |
+
If some error detected, the method raises the error
|
| 50 |
+
"""
|
| 51 |
+
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
|
| 52 |
+
|
| 53 |
+
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
|
| 54 |
+
|
| 55 |
+
#Validate format of GroundTruth
|
| 56 |
+
for k in gt:
|
| 57 |
+
rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True)
|
| 58 |
+
|
| 59 |
+
#Validate format of results
|
| 60 |
+
for k in subm:
|
| 61 |
+
if (k in gt) == False :
|
| 62 |
+
raise Exception("The sample %s not present in GT" %k)
|
| 63 |
+
|
| 64 |
+
rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES'])
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
|
| 68 |
+
"""
|
| 69 |
+
Method evaluate_method: evaluate method and returns the results
|
| 70 |
+
Results. Dictionary with the following values:
|
| 71 |
+
- method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
|
| 72 |
+
- samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
|
| 73 |
+
"""
|
| 74 |
+
for module,alias in evaluation_imports().items():
|
| 75 |
+
globals()[alias] = importlib.import_module(module)
|
| 76 |
+
|
| 77 |
+
def polygon_from_points(points,correctOffset=False):
|
| 78 |
+
"""
|
| 79 |
+
Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
if correctOffset: #this will substract 1 from the coordinates that correspond to the xmax and ymax
|
| 83 |
+
points[2] -= 1
|
| 84 |
+
points[4] -= 1
|
| 85 |
+
points[5] -= 1
|
| 86 |
+
points[7] -= 1
|
| 87 |
+
|
| 88 |
+
resBoxes=np.empty([1,8],dtype='int32')
|
| 89 |
+
resBoxes[0,0]=int(points[0])
|
| 90 |
+
resBoxes[0,4]=int(points[1])
|
| 91 |
+
resBoxes[0,1]=int(points[2])
|
| 92 |
+
resBoxes[0,5]=int(points[3])
|
| 93 |
+
resBoxes[0,2]=int(points[4])
|
| 94 |
+
resBoxes[0,6]=int(points[5])
|
| 95 |
+
resBoxes[0,3]=int(points[6])
|
| 96 |
+
resBoxes[0,7]=int(points[7])
|
| 97 |
+
pointMat = resBoxes[0].reshape([2,4]).T
|
| 98 |
+
return plg.Polygon( pointMat)
|
| 99 |
+
|
| 100 |
+
def rectangle_to_polygon(rect):
|
| 101 |
+
resBoxes=np.empty([1,8],dtype='int32')
|
| 102 |
+
resBoxes[0,0]=int(rect.xmin)
|
| 103 |
+
resBoxes[0,4]=int(rect.ymax)
|
| 104 |
+
resBoxes[0,1]=int(rect.xmin)
|
| 105 |
+
resBoxes[0,5]=int(rect.ymin)
|
| 106 |
+
resBoxes[0,2]=int(rect.xmax)
|
| 107 |
+
resBoxes[0,6]=int(rect.ymin)
|
| 108 |
+
resBoxes[0,3]=int(rect.xmax)
|
| 109 |
+
resBoxes[0,7]=int(rect.ymax)
|
| 110 |
+
|
| 111 |
+
pointMat = resBoxes[0].reshape([2,4]).T
|
| 112 |
+
|
| 113 |
+
return plg.Polygon( pointMat)
|
| 114 |
+
|
| 115 |
+
def rectangle_to_points(rect):
|
| 116 |
+
points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]
|
| 117 |
+
return points
|
| 118 |
+
|
| 119 |
+
def get_union(pD,pG):
|
| 120 |
+
areaA = pD.area();
|
| 121 |
+
areaB = pG.area();
|
| 122 |
+
return areaA + areaB - get_intersection(pD, pG);
|
| 123 |
+
|
| 124 |
+
def get_intersection_over_union(pD,pG):
|
| 125 |
+
try:
|
| 126 |
+
return get_intersection(pD, pG) / get_union(pD, pG);
|
| 127 |
+
except:
|
| 128 |
+
return 0
|
| 129 |
+
|
| 130 |
+
def get_intersection(pD,pG):
|
| 131 |
+
pInt = pD & pG
|
| 132 |
+
if len(pInt) == 0:
|
| 133 |
+
return 0
|
| 134 |
+
return pInt.area()
|
| 135 |
+
|
| 136 |
+
def compute_ap(confList, matchList,numGtCare):
|
| 137 |
+
correct = 0
|
| 138 |
+
AP = 0
|
| 139 |
+
if len(confList)>0:
|
| 140 |
+
confList = np.array(confList)
|
| 141 |
+
matchList = np.array(matchList)
|
| 142 |
+
sorted_ind = np.argsort(-confList)
|
| 143 |
+
confList = confList[sorted_ind]
|
| 144 |
+
matchList = matchList[sorted_ind]
|
| 145 |
+
for n in range(len(confList)):
|
| 146 |
+
match = matchList[n]
|
| 147 |
+
if match:
|
| 148 |
+
correct += 1
|
| 149 |
+
AP += float(correct)/(n + 1)
|
| 150 |
+
|
| 151 |
+
if numGtCare>0:
|
| 152 |
+
AP /= numGtCare
|
| 153 |
+
|
| 154 |
+
return AP
|
| 155 |
+
|
| 156 |
+
def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True):
|
| 157 |
+
|
| 158 |
+
if onlyRemoveFirstLastCharacterGT:
|
| 159 |
+
#special characters in GT are allowed only at initial or final position
|
| 160 |
+
if (transGt==transDet):
|
| 161 |
+
return True
|
| 162 |
+
|
| 163 |
+
if specialCharacters.find(transGt[0])>-1:
|
| 164 |
+
if transGt[1:]==transDet:
|
| 165 |
+
return True
|
| 166 |
+
|
| 167 |
+
if specialCharacters.find(transGt[-1])>-1:
|
| 168 |
+
if transGt[0:len(transGt)-1]==transDet:
|
| 169 |
+
return True
|
| 170 |
+
|
| 171 |
+
if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1:
|
| 172 |
+
if transGt[1:len(transGt)-1]==transDet:
|
| 173 |
+
return True
|
| 174 |
+
return False
|
| 175 |
+
else:
|
| 176 |
+
#Special characters are removed from the begining and the end of both Detection and GroundTruth
|
| 177 |
+
while len(transGt)>0 and specialCharacters.find(transGt[0])>-1:
|
| 178 |
+
transGt = transGt[1:]
|
| 179 |
+
|
| 180 |
+
while len(transDet)>0 and specialCharacters.find(transDet[0])>-1:
|
| 181 |
+
transDet = transDet[1:]
|
| 182 |
+
|
| 183 |
+
while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 :
|
| 184 |
+
transGt = transGt[0:len(transGt)-1]
|
| 185 |
+
|
| 186 |
+
while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1:
|
| 187 |
+
transDet = transDet[0:len(transDet)-1]
|
| 188 |
+
|
| 189 |
+
return transGt == transDet
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def include_in_dictionary(transcription):
|
| 193 |
+
"""
|
| 194 |
+
Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care
|
| 195 |
+
"""
|
| 196 |
+
#special case 's at final
|
| 197 |
+
if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
|
| 198 |
+
transcription = transcription[0:len(transcription)-2]
|
| 199 |
+
|
| 200 |
+
#hypens at init or final of the word
|
| 201 |
+
transcription = transcription.strip('-');
|
| 202 |
+
|
| 203 |
+
specialCharacters = "'!?.:,*\"()·[]/";
|
| 204 |
+
for character in specialCharacters:
|
| 205 |
+
transcription = transcription.replace(character,' ')
|
| 206 |
+
|
| 207 |
+
transcription = transcription.strip()
|
| 208 |
+
|
| 209 |
+
if len(transcription) != len(transcription.replace(" ","")) :
|
| 210 |
+
return False;
|
| 211 |
+
|
| 212 |
+
if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']:
|
| 213 |
+
return False;
|
| 214 |
+
|
| 215 |
+
notAllowed = "×÷·";
|
| 216 |
+
|
| 217 |
+
range1 = [ ord(u'a'), ord(u'z') ]
|
| 218 |
+
range2 = [ ord(u'A'), ord(u'Z') ]
|
| 219 |
+
range3 = [ ord(u'À'), ord(u'ƿ') ]
|
| 220 |
+
range4 = [ ord(u'DŽ'), ord(u'ɿ') ]
|
| 221 |
+
range5 = [ ord(u'Ά'), ord(u'Ͽ') ]
|
| 222 |
+
range6 = [ ord(u'-'), ord(u'-') ]
|
| 223 |
+
|
| 224 |
+
for char in transcription :
|
| 225 |
+
charCode = ord(char)
|
| 226 |
+
if(notAllowed.find(char) != -1):
|
| 227 |
+
return False
|
| 228 |
+
|
| 229 |
+
valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] )
|
| 230 |
+
if valid == False:
|
| 231 |
+
return False
|
| 232 |
+
|
| 233 |
+
return True
|
| 234 |
+
|
| 235 |
+
def include_in_dictionary_transcription(transcription):
|
| 236 |
+
"""
|
| 237 |
+
Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations
|
| 238 |
+
"""
|
| 239 |
+
#special case 's at final
|
| 240 |
+
if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
|
| 241 |
+
transcription = transcription[0:len(transcription)-2]
|
| 242 |
+
|
| 243 |
+
#hypens at init or final of the word
|
| 244 |
+
transcription = transcription.strip('-');
|
| 245 |
+
|
| 246 |
+
specialCharacters = "'!?.:,*\"()·[]/";
|
| 247 |
+
for character in specialCharacters:
|
| 248 |
+
transcription = transcription.replace(character,' ')
|
| 249 |
+
|
| 250 |
+
transcription = transcription.strip()
|
| 251 |
+
|
| 252 |
+
return transcription
|
| 253 |
+
|
| 254 |
+
perSampleMetrics = {}
|
| 255 |
+
|
| 256 |
+
matchedSum = 0
|
| 257 |
+
|
| 258 |
+
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
|
| 259 |
+
|
| 260 |
+
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
|
| 261 |
+
subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
|
| 262 |
+
|
| 263 |
+
numGlobalCareGt = 0;
|
| 264 |
+
numGlobalCareDet = 0;
|
| 265 |
+
|
| 266 |
+
arrGlobalConfidences = [];
|
| 267 |
+
arrGlobalMatches = [];
|
| 268 |
+
|
| 269 |
+
for resFile in gt:
|
| 270 |
+
|
| 271 |
+
gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
|
| 272 |
+
if (gtFile is None) :
|
| 273 |
+
raise Exception("The file %s is not UTF-8" %resFile)
|
| 274 |
+
|
| 275 |
+
recall = 0
|
| 276 |
+
precision = 0
|
| 277 |
+
hmean = 0
|
| 278 |
+
detCorrect = 0
|
| 279 |
+
iouMat = np.empty([1,1])
|
| 280 |
+
gtPols = []
|
| 281 |
+
detPols = []
|
| 282 |
+
gtTrans = []
|
| 283 |
+
detTrans = []
|
| 284 |
+
gtPolPoints = []
|
| 285 |
+
detPolPoints = []
|
| 286 |
+
gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care
|
| 287 |
+
detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT
|
| 288 |
+
detMatchedNums = []
|
| 289 |
+
pairs = []
|
| 290 |
+
|
| 291 |
+
arrSampleConfidences = [];
|
| 292 |
+
arrSampleMatch = [];
|
| 293 |
+
sampleAP = 0;
|
| 294 |
+
|
| 295 |
+
evaluationLog = ""
|
| 296 |
+
|
| 297 |
+
pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False)
|
| 298 |
+
for n in range(len(pointsList)):
|
| 299 |
+
points = pointsList[n]
|
| 300 |
+
transcription = transcriptionsList[n]
|
| 301 |
+
dontCare = transcription == "###"
|
| 302 |
+
if evaluationParams['LTRB']:
|
| 303 |
+
gtRect = Rectangle(*points)
|
| 304 |
+
gtPol = rectangle_to_polygon(gtRect)
|
| 305 |
+
else:
|
| 306 |
+
gtPol = polygon_from_points(points)
|
| 307 |
+
gtPols.append(gtPol)
|
| 308 |
+
gtPolPoints.append(points)
|
| 309 |
+
|
| 310 |
+
#On word spotting we will filter some transcriptions with special characters
|
| 311 |
+
if evaluationParams['WORD_SPOTTING'] :
|
| 312 |
+
if dontCare == False :
|
| 313 |
+
if include_in_dictionary(transcription) == False :
|
| 314 |
+
dontCare = True
|
| 315 |
+
else:
|
| 316 |
+
transcription = include_in_dictionary_transcription(transcription)
|
| 317 |
+
|
| 318 |
+
gtTrans.append(transcription)
|
| 319 |
+
if dontCare:
|
| 320 |
+
gtDontCarePolsNum.append( len(gtPols)-1 )
|
| 321 |
+
|
| 322 |
+
evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n")
|
| 323 |
+
|
| 324 |
+
if resFile in subm:
|
| 325 |
+
|
| 326 |
+
detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
|
| 327 |
+
|
| 328 |
+
pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES'])
|
| 329 |
+
|
| 330 |
+
for n in range(len(pointsList)):
|
| 331 |
+
points = pointsList[n]
|
| 332 |
+
transcription = transcriptionsList[n]
|
| 333 |
+
|
| 334 |
+
if evaluationParams['LTRB']:
|
| 335 |
+
detRect = Rectangle(*points)
|
| 336 |
+
detPol = rectangle_to_polygon(detRect)
|
| 337 |
+
else:
|
| 338 |
+
detPol = polygon_from_points(points)
|
| 339 |
+
detPols.append(detPol)
|
| 340 |
+
detPolPoints.append(points)
|
| 341 |
+
detTrans.append(transcription)
|
| 342 |
+
|
| 343 |
+
if len(gtDontCarePolsNum)>0 :
|
| 344 |
+
for dontCarePol in gtDontCarePolsNum:
|
| 345 |
+
dontCarePol = gtPols[dontCarePol]
|
| 346 |
+
intersected_area = get_intersection(dontCarePol,detPol)
|
| 347 |
+
pdDimensions = detPol.area()
|
| 348 |
+
precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
|
| 349 |
+
if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ):
|
| 350 |
+
detDontCarePolsNum.append( len(detPols)-1 )
|
| 351 |
+
break
|
| 352 |
+
|
| 353 |
+
evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n")
|
| 354 |
+
|
| 355 |
+
if len(gtPols)>0 and len(detPols)>0:
|
| 356 |
+
#Calculate IoU and precision matrixs
|
| 357 |
+
outputShape=[len(gtPols),len(detPols)]
|
| 358 |
+
iouMat = np.empty(outputShape)
|
| 359 |
+
gtRectMat = np.zeros(len(gtPols),np.int8)
|
| 360 |
+
detRectMat = np.zeros(len(detPols),np.int8)
|
| 361 |
+
for gtNum in range(len(gtPols)):
|
| 362 |
+
for detNum in range(len(detPols)):
|
| 363 |
+
pG = gtPols[gtNum]
|
| 364 |
+
pD = detPols[detNum]
|
| 365 |
+
iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG)
|
| 366 |
+
|
| 367 |
+
for gtNum in range(len(gtPols)):
|
| 368 |
+
for detNum in range(len(detPols)):
|
| 369 |
+
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum :
|
| 370 |
+
if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']:
|
| 371 |
+
gtRectMat[gtNum] = 1
|
| 372 |
+
detRectMat[detNum] = 1
|
| 373 |
+
#detection matched only if transcription is equal
|
| 374 |
+
if evaluationParams['WORD_SPOTTING']:
|
| 375 |
+
correct = gtTrans[gtNum].upper() == detTrans[detNum].upper()
|
| 376 |
+
else:
|
| 377 |
+
correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True
|
| 378 |
+
detCorrect += (1 if correct else 0)
|
| 379 |
+
if correct:
|
| 380 |
+
detMatchedNums.append(detNum)
|
| 381 |
+
pairs.append({'gt':gtNum,'det':detNum,'correct':correct})
|
| 382 |
+
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n"
|
| 383 |
+
|
| 384 |
+
if evaluationParams['CONFIDENCES']:
|
| 385 |
+
for detNum in range(len(detPols)):
|
| 386 |
+
if detNum not in detDontCarePolsNum :
|
| 387 |
+
#we exclude the don't care detections
|
| 388 |
+
match = detNum in detMatchedNums
|
| 389 |
+
|
| 390 |
+
arrSampleConfidences.append(confidencesList[detNum])
|
| 391 |
+
arrSampleMatch.append(match)
|
| 392 |
+
|
| 393 |
+
arrGlobalConfidences.append(confidencesList[detNum]);
|
| 394 |
+
arrGlobalMatches.append(match);
|
| 395 |
+
|
| 396 |
+
numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
|
| 397 |
+
numDetCare = (len(detPols) - len(detDontCarePolsNum))
|
| 398 |
+
if numGtCare == 0:
|
| 399 |
+
recall = float(1)
|
| 400 |
+
precision = float(0) if numDetCare >0 else float(1)
|
| 401 |
+
sampleAP = precision
|
| 402 |
+
else:
|
| 403 |
+
recall = float(detCorrect) / numGtCare
|
| 404 |
+
precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare
|
| 405 |
+
if evaluationParams['CONFIDENCES']:
|
| 406 |
+
sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare )
|
| 407 |
+
|
| 408 |
+
hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
|
| 409 |
+
|
| 410 |
+
matchedSum += detCorrect
|
| 411 |
+
numGlobalCareGt += numGtCare
|
| 412 |
+
numGlobalCareDet += numDetCare
|
| 413 |
+
|
| 414 |
+
perSampleMetrics[resFile] = {
|
| 415 |
+
'precision':precision,
|
| 416 |
+
'recall':recall,
|
| 417 |
+
'hmean':hmean,
|
| 418 |
+
'pairs':pairs,
|
| 419 |
+
'AP':sampleAP,
|
| 420 |
+
'iouMat':[] if len(detPols)>100 else iouMat.tolist(),
|
| 421 |
+
'gtPolPoints':gtPolPoints,
|
| 422 |
+
'detPolPoints':detPolPoints,
|
| 423 |
+
'gtTrans':gtTrans,
|
| 424 |
+
'detTrans':detTrans,
|
| 425 |
+
'gtDontCare':gtDontCarePolsNum,
|
| 426 |
+
'detDontCare':detDontCarePolsNum,
|
| 427 |
+
'evaluationParams': evaluationParams,
|
| 428 |
+
'evaluationLog': evaluationLog
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
# Compute AP
|
| 432 |
+
AP = 0
|
| 433 |
+
if evaluationParams['CONFIDENCES']:
|
| 434 |
+
AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)
|
| 435 |
+
|
| 436 |
+
methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt
|
| 437 |
+
methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet
|
| 438 |
+
methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
|
| 439 |
+
|
| 440 |
+
methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP }
|
| 441 |
+
|
| 442 |
+
resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
return resDict
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
if __name__=='__main__':
|
| 450 |
+
|
| 451 |
+
rrc_evaluation_funcs.main_evaluation(None,default_evaluation_params,validate_data,evaluate_method)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_metric.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
import re
|
| 3 |
+
import os
|
| 4 |
+
import ast
|
| 5 |
+
import ipdb
|
| 6 |
+
import shutil
|
| 7 |
+
import zipfile
|
| 8 |
+
import subprocess
|
| 9 |
+
import vlmeval.dataset.utils.Ocrbench_v2.spotting_eval.rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs
|
| 10 |
+
from vlmeval.dataset.utils.Ocrbench_v2.spotting_eval.script import default_evaluation_params,validate_data,evaluate_method
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def extract_bounding_boxes_robust(predict_str):
|
| 14 |
+
"""
|
| 15 |
+
Extract coordinates and text content from the given prediction string,
|
| 16 |
+
handling potential format issues.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
predict_str (str): Model prediction output as a string.
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
list: Extracted data in the format [[x1, y1, x2, y2, text_content], ...].
|
| 23 |
+
Returns None if no valid data is extracted.
|
| 24 |
+
"""
|
| 25 |
+
results = []
|
| 26 |
+
seen = set()
|
| 27 |
+
|
| 28 |
+
# try parsing with ast.literal_eval
|
| 29 |
+
try:
|
| 30 |
+
data = ast.literal_eval(predict_str)
|
| 31 |
+
except Exception:
|
| 32 |
+
data = None
|
| 33 |
+
|
| 34 |
+
if data is not None:
|
| 35 |
+
if isinstance(data, (list, tuple)):
|
| 36 |
+
for item in data:
|
| 37 |
+
if isinstance(item, (list, tuple)) and len(item) >= 5:
|
| 38 |
+
x1_str, y1_str, x2_str, y2_str = item[:4]
|
| 39 |
+
text_content = item[4]
|
| 40 |
+
|
| 41 |
+
x1_str = str(x1_str).strip()
|
| 42 |
+
y1_str = str(y1_str).strip()
|
| 43 |
+
x2_str = str(x2_str).strip()
|
| 44 |
+
y2_str = str(y2_str).strip()
|
| 45 |
+
text_content = str(text_content).replace("\n", "").strip().strip('"').strip("'")
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
x1 = int(x1_str)
|
| 49 |
+
y1 = int(y1_str)
|
| 50 |
+
x2 = int(x2_str)
|
| 51 |
+
y2 = int(y2_str)
|
| 52 |
+
|
| 53 |
+
if not (0 <= x1 <= 1000 and 0 <= y1 <= 1000 and 0 <= x2 <= 1000 and 0 <= y2 <= 1000):
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
key = (x1, y1, x2, y2, text_content)
|
| 57 |
+
if key in seen:
|
| 58 |
+
continue
|
| 59 |
+
|
| 60 |
+
seen.add(key)
|
| 61 |
+
results.append([x1, y1, x2, y2, text_content])
|
| 62 |
+
except ValueError:
|
| 63 |
+
continue
|
| 64 |
+
else:
|
| 65 |
+
# try parsing with regular expression
|
| 66 |
+
|
| 67 |
+
list_content = predict_str
|
| 68 |
+
items = re.findall(r'[\[\(]\s*([^\[\]\(\)]*?)\s*[\]\)]', list_content)
|
| 69 |
+
|
| 70 |
+
if not items:
|
| 71 |
+
return None
|
| 72 |
+
|
| 73 |
+
for item in items:
|
| 74 |
+
parts = item.split(',', 4)
|
| 75 |
+
if len(parts) < 5:
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
x1_str, y1_str, x2_str, y2_str, text_content = parts
|
| 79 |
+
|
| 80 |
+
x1_str = x1_str.strip()
|
| 81 |
+
y1_str = y1_str.strip()
|
| 82 |
+
x2_str = x2_str.strip()
|
| 83 |
+
y2_str = y2_str.strip()
|
| 84 |
+
text_content = text_content.replace("\n", "").strip().strip('"').strip("'")
|
| 85 |
+
|
| 86 |
+
try:
|
| 87 |
+
x1 = int(x1_str)
|
| 88 |
+
y1 = int(y1_str)
|
| 89 |
+
x2 = int(x2_str)
|
| 90 |
+
y2 = int(y2_str)
|
| 91 |
+
|
| 92 |
+
if not (0 <= x1 <= 1000 and 0 <= y1 <= 1000 and 0 <= x2 <= 1000 and 0 <= y2 <= 1000):
|
| 93 |
+
continue
|
| 94 |
+
|
| 95 |
+
key = (x1, y1, x2, y2, text_content)
|
| 96 |
+
if key in seen:
|
| 97 |
+
continue
|
| 98 |
+
|
| 99 |
+
seen.add(key)
|
| 100 |
+
results.append([x1, y1, x2, y2, text_content])
|
| 101 |
+
except ValueError:
|
| 102 |
+
continue
|
| 103 |
+
|
| 104 |
+
if not results:
|
| 105 |
+
return None
|
| 106 |
+
|
| 107 |
+
return results
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def zip_folder(source_folder, destination_zip):
|
| 111 |
+
abs_source = os.path.abspath(source_folder)
|
| 112 |
+
abs_destination = os.path.abspath(destination_zip)
|
| 113 |
+
|
| 114 |
+
with zipfile.ZipFile(abs_destination, 'w', zipfile.ZIP_DEFLATED) as zf:
|
| 115 |
+
for root, _, files in os.walk(abs_source):
|
| 116 |
+
for file in files:
|
| 117 |
+
abs_file_path = os.path.join(root, file)
|
| 118 |
+
|
| 119 |
+
relative_path = os.path.relpath(abs_file_path, abs_source)
|
| 120 |
+
zf.write(abs_file_path, relative_path)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def spotting_evaluation(prediction_list, img_metas):
|
| 124 |
+
score = 0
|
| 125 |
+
|
| 126 |
+
submit_path = ".vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/submit"
|
| 127 |
+
gt_path = ".vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/gt"
|
| 128 |
+
submit_zip_path = ".vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/submit.zip"
|
| 129 |
+
gt_zip_path = ".vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/gt.zip"
|
| 130 |
+
for file_path in [submit_path, gt_path, submit_zip_path, gt_zip_path]:
|
| 131 |
+
if "zip" in file_path:
|
| 132 |
+
if os.path.exists(file_path):
|
| 133 |
+
os.remove(file_path)
|
| 134 |
+
else:
|
| 135 |
+
if os.path.exists(file_path):
|
| 136 |
+
shutil.rmtree(file_path)
|
| 137 |
+
os.makedirs(file_path)
|
| 138 |
+
|
| 139 |
+
res_submit_list = []
|
| 140 |
+
for item in prediction_list:
|
| 141 |
+
if len(item) != 5:
|
| 142 |
+
ipdb.set_trace()
|
| 143 |
+
x1, y1, x2, y2, rec = item
|
| 144 |
+
if x1 >= x2 or y1 >= y2:
|
| 145 |
+
continue
|
| 146 |
+
|
| 147 |
+
res_submit_list.append(",".join([str(x1),str(y1),str(x2),str(y1),str(x2),str(y2),str(x1),str(y2),rec]))
|
| 148 |
+
|
| 149 |
+
res_gt_list = []
|
| 150 |
+
for bbox, rec in zip(img_metas["bbox"], img_metas["content"]):
|
| 151 |
+
x_coords = bbox[0::2]
|
| 152 |
+
y_coords = bbox[1::2]
|
| 153 |
+
|
| 154 |
+
x1, y1 = min(x_coords), min(y_coords)
|
| 155 |
+
x2, y2 = max(x_coords), max(y_coords)
|
| 156 |
+
|
| 157 |
+
res_gt_list.append(",".join([str(x1),str(y1),str(x2),str(y1),str(x2),str(y2),str(x1),str(y2),rec]))
|
| 158 |
+
|
| 159 |
+
if len(res_submit_list) == 0 or len(res_gt_list) == 0:
|
| 160 |
+
return 0
|
| 161 |
+
|
| 162 |
+
with open(os.path.join(submit_path,"res_img_0.txt"), "w") as f:
|
| 163 |
+
for item in res_submit_list[:-1]:
|
| 164 |
+
f.write(item + "\n")
|
| 165 |
+
f.write(res_submit_list[-1])
|
| 166 |
+
|
| 167 |
+
with open(os.path.join(gt_path,"gt_img_0.txt"), "w") as f:
|
| 168 |
+
for item in res_gt_list[:-1]:
|
| 169 |
+
f.write(item + "\n")
|
| 170 |
+
f.write(res_gt_list[-1])
|
| 171 |
+
|
| 172 |
+
zip_folder(submit_path, submit_zip_path)
|
| 173 |
+
zip_folder(gt_path, gt_zip_path)
|
| 174 |
+
|
| 175 |
+
command = {
|
| 176 |
+
'g': gt_zip_path,
|
| 177 |
+
's': submit_zip_path,
|
| 178 |
+
'o': './',
|
| 179 |
+
'p': '{"IOU_CONSTRAINT":0.5}'
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
# run rrc_evaluation_funcs
|
| 183 |
+
result = rrc_evaluation_funcs.main_evaluation(command,default_evaluation_params,validate_data,evaluate_method)
|
| 184 |
+
score = result["method"]["hmean"]
|
| 185 |
+
return score
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/vqa_metric.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import ipdb
|
| 5 |
+
import math
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def levenshtein_distance(s1, s2):
|
| 10 |
+
if len(s1) > len(s2):
|
| 11 |
+
s1, s2 = s2, s1
|
| 12 |
+
|
| 13 |
+
distances = range(len(s1) + 1)
|
| 14 |
+
for i2, c2 in enumerate(s2):
|
| 15 |
+
distances_ = [i2 + 1]
|
| 16 |
+
for i1, c1 in enumerate(s1):
|
| 17 |
+
if c1 == c2:
|
| 18 |
+
distances_.append(distances[i1])
|
| 19 |
+
else:
|
| 20 |
+
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
|
| 21 |
+
distances = distances_
|
| 22 |
+
return distances[-1]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def vqa_evaluation(predict, answers):
|
| 26 |
+
score = 0
|
| 27 |
+
if isinstance(answers, list):
|
| 28 |
+
for j in range(len(answers)):
|
| 29 |
+
if isinstance(answers[j], (int, float)):
|
| 30 |
+
answers[j] = str(answers[j])
|
| 31 |
+
try:
|
| 32 |
+
answer = answers[j].lower().strip().replace("\n"," ")
|
| 33 |
+
except:
|
| 34 |
+
ipdb.set_trace()
|
| 35 |
+
if isinstance(predict, (int, float)):
|
| 36 |
+
predict = str(predict)
|
| 37 |
+
predict = predict.lower().strip().replace("\n"," ")
|
| 38 |
+
if len(answer.split()) < 5:
|
| 39 |
+
if answer in predict:
|
| 40 |
+
score = 1
|
| 41 |
+
else:
|
| 42 |
+
dist = levenshtein_distance(predict, answer)
|
| 43 |
+
length = max(len(predict), len(answer))
|
| 44 |
+
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
| 45 |
+
ANLS_value = 1 - ANLS_value
|
| 46 |
+
|
| 47 |
+
if ANLS_value >= 0.5 and ANLS_value > score:
|
| 48 |
+
score = ANLS_value
|
| 49 |
+
|
| 50 |
+
else:
|
| 51 |
+
answers = answers.lower().strip().replace("\n"," ")
|
| 52 |
+
predict = predict.lower().strip().replace("\n"," ")
|
| 53 |
+
if len(answers.split()) < 5:
|
| 54 |
+
if answers in predict:
|
| 55 |
+
score = 1
|
| 56 |
+
else:
|
| 57 |
+
dist = levenshtein_distance(predict, answers)
|
| 58 |
+
length = max(len(predict), len(answers))
|
| 59 |
+
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
| 60 |
+
ANLS_value = 1 - ANLS_value
|
| 61 |
+
|
| 62 |
+
if ANLS_value >= 0.5 and ANLS_value > score:
|
| 63 |
+
score = ANLS_value
|
| 64 |
+
|
| 65 |
+
return score
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def cn_vqa_evaluation(predict, answers):
|
| 69 |
+
score = 0
|
| 70 |
+
if isinstance(answers, list):
|
| 71 |
+
for j in range(len(answers)):
|
| 72 |
+
if isinstance(answers[j], (int, float)):
|
| 73 |
+
answers[j] = str(answers[j])
|
| 74 |
+
try:
|
| 75 |
+
answer = answers[j].lower().strip().replace("\n"," ").replace(" ", "")
|
| 76 |
+
except:
|
| 77 |
+
ipdb.set_trace()
|
| 78 |
+
if isinstance(predict, (int, float)):
|
| 79 |
+
predict = str(predict)
|
| 80 |
+
predict = predict.lower().strip().replace("\n"," ").replace(" ", "")
|
| 81 |
+
if len(answer.split(",")) < 4:
|
| 82 |
+
if answer in predict:
|
| 83 |
+
score = 1
|
| 84 |
+
else:
|
| 85 |
+
dist = levenshtein_distance(predict, answer)
|
| 86 |
+
length = max(len(predict), len(answer))
|
| 87 |
+
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
| 88 |
+
ANLS_value = 1 - ANLS_value
|
| 89 |
+
|
| 90 |
+
if ANLS_value >= 0.5 and ANLS_value > score:
|
| 91 |
+
score = ANLS_value
|
| 92 |
+
|
| 93 |
+
else:
|
| 94 |
+
answers = answers.lower().strip().replace("\n"," ").replace(" ", "")
|
| 95 |
+
predict = predict.lower().strip().replace("\n"," ").replace(" ", "")
|
| 96 |
+
if len(answer.split(",")) < 4:
|
| 97 |
+
if answers in predict:
|
| 98 |
+
score = 1
|
| 99 |
+
else:
|
| 100 |
+
dist = levenshtein_distance(predict, answers)
|
| 101 |
+
length = max(len(predict), len(answers))
|
| 102 |
+
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
| 103 |
+
ANLS_value = 1 - ANLS_value
|
| 104 |
+
|
| 105 |
+
if ANLS_value >= 0.5 and ANLS_value > score:
|
| 106 |
+
score = ANLS_value
|
| 107 |
+
|
| 108 |
+
return score
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def vqa_evaluation_case_sensitive(predict, answers):
|
| 112 |
+
score = 0
|
| 113 |
+
if isinstance(answers, list):
|
| 114 |
+
for j in range(len(answers)):
|
| 115 |
+
if isinstance(answers[j], (int, float)):
|
| 116 |
+
answers[j] = str(answers[j])
|
| 117 |
+
try:
|
| 118 |
+
answer = answers[j].strip().replace("\n"," ")
|
| 119 |
+
except:
|
| 120 |
+
ipdb.set_trace()
|
| 121 |
+
predict = predict.strip().replace("\n"," ")
|
| 122 |
+
if len(answer.split()) < 5:
|
| 123 |
+
if answer in predict:
|
| 124 |
+
score = 1
|
| 125 |
+
else:
|
| 126 |
+
dist = levenshtein_distance(predict, answer)
|
| 127 |
+
length = max(len(predict), len(answer))
|
| 128 |
+
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
| 129 |
+
ANLS_value = 1 - ANLS_value
|
| 130 |
+
|
| 131 |
+
if ANLS_value >= 0.5 and ANLS_value > score:
|
| 132 |
+
score = ANLS_value
|
| 133 |
+
|
| 134 |
+
else:
|
| 135 |
+
answers = answers.strip().replace("\n"," ")
|
| 136 |
+
predict = predict.strip().replace("\n"," ")
|
| 137 |
+
if len(answers.split()) < 5:
|
| 138 |
+
if answers in predict:
|
| 139 |
+
score = 1
|
| 140 |
+
else:
|
| 141 |
+
dist = levenshtein_distance(predict, answers)
|
| 142 |
+
length = max(len(predict), len(answers))
|
| 143 |
+
ANLS_value = 0.0 if length == 0 else float(dist) / float(length)
|
| 144 |
+
ANLS_value = 1 - ANLS_value
|
| 145 |
+
|
| 146 |
+
if ANLS_value >= 0.5 and ANLS_value > score:
|
| 147 |
+
score = ANLS_value
|
| 148 |
+
|
| 149 |
+
return score
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def extract_first_number(string):
|
| 153 |
+
match = re.search(r'\d+', string)
|
| 154 |
+
if match:
|
| 155 |
+
return int(match.group())
|
| 156 |
+
return None
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def counting_evaluation(predict, answers, eval_method):
|
| 160 |
+
score = 0
|
| 161 |
+
|
| 162 |
+
if isinstance(predict, str):
|
| 163 |
+
predict_processed = predict.lower().strip().replace("\n", " ")
|
| 164 |
+
elif math.isnan(predict):
|
| 165 |
+
return 0
|
| 166 |
+
else:
|
| 167 |
+
predict_processed = int(predict)
|
| 168 |
+
if isinstance(answers, list):
|
| 169 |
+
temp_score = 0
|
| 170 |
+
for j in range(len(answers)):
|
| 171 |
+
if isinstance(answers[j], (int, float)):
|
| 172 |
+
answers[j] = str(answers[j])
|
| 173 |
+
answer = answers[j].lower().strip().replace("\n"," ")
|
| 174 |
+
if eval_method == "exact match":
|
| 175 |
+
if answer in predict:
|
| 176 |
+
score = 1
|
| 177 |
+
else:
|
| 178 |
+
score = 0
|
| 179 |
+
elif eval_method == "regression":
|
| 180 |
+
predict_number = extract_first_number(predict_processed)
|
| 181 |
+
if predict_number:
|
| 182 |
+
|
| 183 |
+
answer = int(answer)
|
| 184 |
+
|
| 185 |
+
if predict_number <= 0 or predict_number >= 2 * answer:
|
| 186 |
+
score = 0
|
| 187 |
+
else:
|
| 188 |
+
iou = 1 - abs(predict_number - answer) / answer
|
| 189 |
+
if iou > 0.5:
|
| 190 |
+
score = iou
|
| 191 |
+
else:
|
| 192 |
+
score = 0
|
| 193 |
+
else:
|
| 194 |
+
score = 0
|
| 195 |
+
if score > temp_score:
|
| 196 |
+
temp_score = score
|
| 197 |
+
score = temp_score
|
| 198 |
+
|
| 199 |
+
else:
|
| 200 |
+
answers = answers.lower().strip().replace("\n"," ")
|
| 201 |
+
predict = predict.lower().strip().replace("\n"," ")
|
| 202 |
+
if eval_method == "exact match":
|
| 203 |
+
if answer in predict:
|
| 204 |
+
score = 1
|
| 205 |
+
else:
|
| 206 |
+
score = 0
|
| 207 |
+
elif eval_method == "regression":
|
| 208 |
+
predict = extract_first_number(predict)
|
| 209 |
+
if predict:
|
| 210 |
+
answer = int(answer)
|
| 211 |
+
if predict <= 0 or predict >= 2 * answer:
|
| 212 |
+
score = 0
|
| 213 |
+
else:
|
| 214 |
+
iou = 1 - abs(predict - answer) / answer
|
| 215 |
+
|
| 216 |
+
if iou > 0.5:
|
| 217 |
+
score = iou
|
| 218 |
+
else:
|
| 219 |
+
score = 0
|
| 220 |
+
else:
|
| 221 |
+
score = 0
|
| 222 |
+
return score
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def math_expression_evaluation(predict, answers):
|
| 226 |
+
score = 0
|
| 227 |
+
if isinstance(answers, list):
|
| 228 |
+
for j in range(len(answers)):
|
| 229 |
+
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
| 230 |
+
predict = predict.strip().replace("\n"," ").replace(" ","")
|
| 231 |
+
if answer in predict:
|
| 232 |
+
score = 1
|
| 233 |
+
else:
|
| 234 |
+
answers = answers.strip().replace("\n"," ").replace(" ","")
|
| 235 |
+
predict = predict.strip().replace("\n"," ").replace(" ","")
|
| 236 |
+
if answers in predict:
|
| 237 |
+
score = 1
|
| 238 |
+
return score
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def remove_text_tags(latex_str):
|
| 242 |
+
"""
|
| 243 |
+
Removes LaTeX \text{...} tags while keeping their content.
|
| 244 |
+
|
| 245 |
+
:param latex_str: A string containing LaTeX expressions
|
| 246 |
+
:return: The processed string with \text{...} tags removed
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
pattern = r'\\text\{([^{}]*)\}'
|
| 250 |
+
|
| 251 |
+
processed_str = re.sub(pattern, r'\1', latex_str)
|
| 252 |
+
|
| 253 |
+
return processed_str
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def cn_math_expression_evaluation(predict, answers):
|
| 257 |
+
score = 0
|
| 258 |
+
|
| 259 |
+
assert len(answers) == 1
|
| 260 |
+
answers = [remove_text_tags(answers[0])]
|
| 261 |
+
predict = remove_text_tags(predict)
|
| 262 |
+
|
| 263 |
+
if isinstance(answers, list):
|
| 264 |
+
for j in range(len(answers)):
|
| 265 |
+
answer = answers[j].strip().replace("\n"," ").replace(" ","")
|
| 266 |
+
predict = predict.strip().replace("\n"," ").replace(" ","")
|
| 267 |
+
if answer in predict:
|
| 268 |
+
score = 1
|
| 269 |
+
else:
|
| 270 |
+
answers = answers.strip().replace("\n"," ").replace(" ","")
|
| 271 |
+
predict = predict.strip().replace("\n"," ").replace(" ","")
|
| 272 |
+
if answers in predict:
|
| 273 |
+
score = 1
|
| 274 |
+
return score
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
if __name__ == "__main__":
|
| 278 |
+
test_predict = "apple pie and banana"
|
| 279 |
+
test_answers = ["apple", "banana pie", "apple pie and orange"]
|
| 280 |
+
|
| 281 |
+
vqa_score = vqa_evaluation(test_predict, test_answers)
|
| 282 |
+
print(f"VQA evaluation score for predict '{test_predict}' and answers {test_answers}: {vqa_score}")
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/cgbench.py
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from .multiple_choice import extract_answer_from_item
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import numpy as np
|
| 5 |
+
import re
|
| 6 |
+
import zipfile
|
| 7 |
+
|
| 8 |
+
FAIL_MSG = "Failed to obtain answer via API."
|
| 9 |
+
|
| 10 |
+
frame_tmpl = "frame-{}-of-{}.jpg"
|
| 11 |
+
|
| 12 |
+
sys_prompt_open_eval_step_1 = (
|
| 13 |
+
"You will be provided with a question, a model's prediction, and the ground "
|
| 14 |
+
"truth answer for this question.\n"
|
| 15 |
+
"Your task is to judge whether the model's prediction is correct based on the "
|
| 16 |
+
"meaning of the two texts.\n"
|
| 17 |
+
"In most cases, this can be done by determining if the meaning of the model's "
|
| 18 |
+
"prediction is consistent with, or contains, the ground truth answer. However, "
|
| 19 |
+
"in some cases where the two texts differ, it may represent different "
|
| 20 |
+
"descriptions of the same visual scene, in which case visual information is "
|
| 21 |
+
"needed for further judgment.\n"
|
| 22 |
+
"Therefore, I hope you:\n"
|
| 23 |
+
"- Output 0, if the model's prediction and the ground truth answer are neither "
|
| 24 |
+
"consistent nor related by inclusion, with fundamentally different meanings.\n"
|
| 25 |
+
"- Output 1, if the meaning of the model's prediction and the ground truth "
|
| 26 |
+
"answer is consistent, or if the model's prediction meaningfully contains the "
|
| 27 |
+
"ground truth answer.\n"
|
| 28 |
+
"- Output 2, if the model's prediction and ground truth are not consistent or "
|
| 29 |
+
"inclusive, but may be different descriptions of the same visual scene, "
|
| 30 |
+
"requiring visual information for further judgment.\n"
|
| 31 |
+
"Only output the answer in the following format:\n\n"
|
| 32 |
+
'```json\n{"result": choice}\n```\n\n'
|
| 33 |
+
"The choice is either 0, 1, or 2 as specified above."
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
sys_prompt_open_eval_step_2 = (
|
| 37 |
+
"You will be provided with a question, a model's prediction, and the sampling "
|
| 38 |
+
"frames of the clue intervals related to this question.\n"
|
| 39 |
+
"Your task is to determine whether the model has answered the question "
|
| 40 |
+
"correctly based on the visual information provided.\n"
|
| 41 |
+
"Therefore, I hope you:\n"
|
| 42 |
+
"- Output 0, if the model's prediction does not correctly answer the question.\n"
|
| 43 |
+
"- Output 1, if the model's prediction correctly answers the question.\n"
|
| 44 |
+
"Only output the answer in the following format without output extra "
|
| 45 |
+
"explanation:\n\n"
|
| 46 |
+
'```json\n{"result": choice}\n```\n\n'
|
| 47 |
+
"The choice is either 0 or 1 as specified above."
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
FAIL_MSG = "Failed to obtain answer via API."
|
| 51 |
+
|
| 52 |
+
# '10-20', '20-30', '30-40', '40-50', '50-60'
|
| 53 |
+
DURATIONS = ["0 ~ 10", "10 ~ 20", "20 ~ 30", "30 ~ 40", "40 ~ 50", "50 ~ 60", "60+"]
|
| 54 |
+
|
| 55 |
+
DOMAINS = [
|
| 56 |
+
"Life Record",
|
| 57 |
+
"Music & TV show",
|
| 58 |
+
"Instruction & Knowledge",
|
| 59 |
+
"Driving",
|
| 60 |
+
"Embodied Expert",
|
| 61 |
+
"Humor/funny",
|
| 62 |
+
"Electonic/Social Gaming",
|
| 63 |
+
"Security & Health",
|
| 64 |
+
"Sports & Exercise",
|
| 65 |
+
"Special Scenes",
|
| 66 |
+
"Art & Culture",
|
| 67 |
+
"GUI",
|
| 68 |
+
"News",
|
| 69 |
+
"Animal & Pet",
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
SUB_CATEGORIES = [
|
| 73 |
+
"Time Cognition",
|
| 74 |
+
"Hallucination",
|
| 75 |
+
"Entity Perception",
|
| 76 |
+
"2D Spatial Perception",
|
| 77 |
+
"Time Perception",
|
| 78 |
+
"Scene Perception",
|
| 79 |
+
"Text Perception",
|
| 80 |
+
"Event Cognition",
|
| 81 |
+
"Entity Cognition",
|
| 82 |
+
"Text Cognition",
|
| 83 |
+
"Event Perception",
|
| 84 |
+
"Scene Cognition",
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def get_dimention_rating_open_ended(data_path):
|
| 89 |
+
# 读取数据
|
| 90 |
+
df = load(data_path)
|
| 91 |
+
|
| 92 |
+
df = df[df["score"] != -1]
|
| 93 |
+
|
| 94 |
+
# 将秒转换为分钟并分配到对应区间
|
| 95 |
+
df["duration_minutes"] = df["duration"] / 60
|
| 96 |
+
df["duration_range"] = pd.cut(
|
| 97 |
+
df["duration_minutes"], bins=[-np.inf, 10, 20, 30, 40, 50, 60, np.inf], labels=DURATIONS
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# 初始化结果字典
|
| 101 |
+
result = {
|
| 102 |
+
"overall": 0,
|
| 103 |
+
"duration": {k: 0 for k in DURATIONS},
|
| 104 |
+
"domain": {k: 0 for k in DOMAINS},
|
| 105 |
+
"sub_category": {k: 0 for k in SUB_CATEGORIES},
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
# Overall
|
| 109 |
+
result["overall"] = round(df["score"].mean(), 4)
|
| 110 |
+
|
| 111 |
+
# Duration
|
| 112 |
+
for dur in DURATIONS:
|
| 113 |
+
dur_scores = df[df["duration_range"] == dur]["score"]
|
| 114 |
+
result["duration"][dur] = round(dur_scores.mean(), 4) if not dur_scores.empty else 0
|
| 115 |
+
|
| 116 |
+
# Domain
|
| 117 |
+
for domain in DOMAINS:
|
| 118 |
+
domain_scores = df[df["domain"] == domain]["score"]
|
| 119 |
+
result["domain"][domain] = round(domain_scores.mean(), 4) if not domain_scores.empty else 0
|
| 120 |
+
|
| 121 |
+
# Sub-category
|
| 122 |
+
for sub_cat in SUB_CATEGORIES:
|
| 123 |
+
sub_cat_scores = df[df["sub_category"] == sub_cat]["score"]
|
| 124 |
+
result["sub_category"][sub_cat] = round(sub_cat_scores.mean(), 4) if not sub_cat_scores.empty else 0
|
| 125 |
+
|
| 126 |
+
return result
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def get_dimention_rating_mcq_grouding(data_path):
|
| 130 |
+
|
| 131 |
+
# 读取数据
|
| 132 |
+
df = load(data_path)
|
| 133 |
+
|
| 134 |
+
# df.loc[(df['task_mode'] == 'miou') & (df['score'] == -1), 'score'] = 0
|
| 135 |
+
|
| 136 |
+
df = df[df["score"] != -1]
|
| 137 |
+
|
| 138 |
+
# 将秒转换为分钟并分配到对应区间
|
| 139 |
+
df["duration_minutes"] = df["duration"] / 60
|
| 140 |
+
df["duration_range"] = pd.cut(
|
| 141 |
+
df["duration_minutes"], bins=[-np.inf, 10, 20, 30, 40, 50, 60, np.inf], labels=DURATIONS
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
# 初始化结果字典
|
| 145 |
+
result = {
|
| 146 |
+
metric: {
|
| 147 |
+
"overall": 0,
|
| 148 |
+
"duration": {k: 0 for k in DURATIONS},
|
| 149 |
+
"domain": {k: 0 for k in DOMAINS},
|
| 150 |
+
"sub_category": {k: 0 for k in SUB_CATEGORIES},
|
| 151 |
+
}
|
| 152 |
+
for metric in ["long_acc", "clue_acc", "miou", "CRR", "acc@iou", "rec@iou"]
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
# 计算基础指标
|
| 156 |
+
for metric in ["long_acc", "clue_acc", "miou"]:
|
| 157 |
+
metric_df = df[df["task_mode"] == metric]
|
| 158 |
+
|
| 159 |
+
# Overall
|
| 160 |
+
result[metric]["overall"] = round(metric_df["score"].mean(), 4)
|
| 161 |
+
|
| 162 |
+
# Duration
|
| 163 |
+
for dur in DURATIONS:
|
| 164 |
+
dur_scores = metric_df[metric_df["duration_range"] == dur]["score"]
|
| 165 |
+
result[metric]["duration"][dur] = round(dur_scores.mean(), 4) if not dur_scores.empty else 0
|
| 166 |
+
|
| 167 |
+
# Domain
|
| 168 |
+
for domain in DOMAINS:
|
| 169 |
+
domain_scores = metric_df[metric_df["domain"] == domain]["score"]
|
| 170 |
+
result[metric]["domain"][domain] = round(domain_scores.mean(), 4) if not domain_scores.empty else 0
|
| 171 |
+
|
| 172 |
+
# Sub-category
|
| 173 |
+
for sub_cat in SUB_CATEGORIES:
|
| 174 |
+
sub_cat_scores = metric_df[metric_df["sub_category"] == sub_cat]["score"]
|
| 175 |
+
result[metric]["sub_category"][sub_cat] = round(sub_cat_scores.mean(), 4) if not sub_cat_scores.empty else 0
|
| 176 |
+
|
| 177 |
+
# 计算复合指标 CRR
|
| 178 |
+
def calculate_crr(scores):
|
| 179 |
+
long_acc = scores[scores["task_mode"] == "long_acc"]["score"].mean()
|
| 180 |
+
clue_acc = scores[scores["task_mode"] == "clue_acc"]["score"].mean()
|
| 181 |
+
return round(min(long_acc, clue_acc) / clue_acc, 4) if clue_acc != 0 else 0
|
| 182 |
+
|
| 183 |
+
# Overall CRR
|
| 184 |
+
result["CRR"]["overall"] = calculate_crr(df)
|
| 185 |
+
|
| 186 |
+
# Duration CRR
|
| 187 |
+
for dur in DURATIONS:
|
| 188 |
+
dur_df = df[df["duration_range"] == dur]
|
| 189 |
+
result["CRR"]["duration"][dur] = calculate_crr(dur_df)
|
| 190 |
+
|
| 191 |
+
# Domain CRR
|
| 192 |
+
for domain in DOMAINS:
|
| 193 |
+
domain_df = df[df["domain"] == domain]
|
| 194 |
+
result["CRR"]["domain"][domain] = calculate_crr(domain_df)
|
| 195 |
+
|
| 196 |
+
# Sub-category CRR
|
| 197 |
+
for sub_cat in SUB_CATEGORIES:
|
| 198 |
+
sub_cat_df = df[df["sub_category"] == sub_cat]
|
| 199 |
+
result["CRR"]["sub_category"][sub_cat] = calculate_crr(sub_cat_df)
|
| 200 |
+
|
| 201 |
+
# 计算 acc@iou
|
| 202 |
+
def calculate_acc_at_iou_threshold(scores, threshold):
|
| 203 |
+
|
| 204 |
+
miou_qids = set(scores[scores["task_mode"] == "miou"]["qid"])
|
| 205 |
+
|
| 206 |
+
long_acc_qids = set(scores[scores["task_mode"] == "long_acc"]["qid"])
|
| 207 |
+
|
| 208 |
+
valid_qids = miou_qids & long_acc_qids
|
| 209 |
+
|
| 210 |
+
miou_positive = set(scores[(scores["task_mode"] == "miou") & (scores["score"] > threshold)]["qid"])
|
| 211 |
+
|
| 212 |
+
long_acc_positive = scores[
|
| 213 |
+
(scores["task_mode"] == "long_acc") & (scores["qid"].isin(miou_positive)) & (scores["score"] == 1)
|
| 214 |
+
]
|
| 215 |
+
|
| 216 |
+
acc_at_iou_threshold = len(long_acc_positive) / len(valid_qids) if len(valid_qids) > 0 else 0
|
| 217 |
+
return round(acc_at_iou_threshold, 4)
|
| 218 |
+
|
| 219 |
+
def calculate_acc_at_iou(scores):
|
| 220 |
+
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5]
|
| 221 |
+
acc_at_iou_values = [calculate_acc_at_iou_threshold(scores, threshold) for threshold in thresholds]
|
| 222 |
+
|
| 223 |
+
return round(sum(acc_at_iou_values) / len(acc_at_iou_values), 4)
|
| 224 |
+
|
| 225 |
+
# Overall acc@iou
|
| 226 |
+
result["acc@iou"]["overall"] = calculate_acc_at_iou(df)
|
| 227 |
+
|
| 228 |
+
# Duration acc@iou
|
| 229 |
+
for dur in DURATIONS:
|
| 230 |
+
dur_df = df[df["duration_range"] == dur]
|
| 231 |
+
result["acc@iou"]["duration"][dur] = calculate_acc_at_iou(dur_df)
|
| 232 |
+
|
| 233 |
+
# Domain acc@iou
|
| 234 |
+
for domain in DOMAINS:
|
| 235 |
+
domain_df = df[df["domain"] == domain]
|
| 236 |
+
result["acc@iou"]["domain"][domain] = calculate_acc_at_iou(domain_df)
|
| 237 |
+
|
| 238 |
+
# Sub-category acc@iou
|
| 239 |
+
for sub_cat in SUB_CATEGORIES:
|
| 240 |
+
sub_cat_df = df[df["sub_category"] == sub_cat]
|
| 241 |
+
result["acc@iou"]["sub_category"][sub_cat] = calculate_acc_at_iou(sub_cat_df)
|
| 242 |
+
|
| 243 |
+
# 计算 rec@iou
|
| 244 |
+
def calculate_rec_at_iou_threshold(scores, threshold):
|
| 245 |
+
# 获取所有 miou 类型的数据
|
| 246 |
+
miou_scores = scores[scores["task_mode"] == "miou"]
|
| 247 |
+
|
| 248 |
+
# 计算 miou score 大于 threshold 的数量
|
| 249 |
+
miou_positive = miou_scores[miou_scores["score"] > threshold]
|
| 250 |
+
|
| 251 |
+
# 计算比例
|
| 252 |
+
rec_at_iou = len(miou_positive) / len(miou_scores) if len(miou_scores) > 0 else 0
|
| 253 |
+
|
| 254 |
+
return round(rec_at_iou, 4)
|
| 255 |
+
|
| 256 |
+
def calculate_rec_at_iou(scores):
|
| 257 |
+
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5]
|
| 258 |
+
rec_at_iou_values = [calculate_rec_at_iou_threshold(scores, threshold) for threshold in thresholds]
|
| 259 |
+
|
| 260 |
+
return round(sum(rec_at_iou_values) / len(rec_at_iou_values), 4)
|
| 261 |
+
|
| 262 |
+
# Overall rec@iou
|
| 263 |
+
result["rec@iou"]["overall"] = calculate_rec_at_iou(df)
|
| 264 |
+
|
| 265 |
+
# Duration rec@iou
|
| 266 |
+
for dur in DURATIONS:
|
| 267 |
+
dur_df = df[df["duration_range"] == dur]
|
| 268 |
+
result["rec@iou"]["duration"][dur] = calculate_rec_at_iou(dur_df)
|
| 269 |
+
|
| 270 |
+
# Domain rec@iou
|
| 271 |
+
for domain in DOMAINS:
|
| 272 |
+
domain_df = df[df["domain"] == domain]
|
| 273 |
+
result["rec@iou"]["domain"][domain] = calculate_rec_at_iou(domain_df)
|
| 274 |
+
|
| 275 |
+
# Sub-category rec@iou
|
| 276 |
+
for sub_cat in SUB_CATEGORIES:
|
| 277 |
+
sub_cat_df = df[df["sub_category"] == sub_cat]
|
| 278 |
+
result["rec@iou"]["sub_category"][sub_cat] = calculate_rec_at_iou(sub_cat_df)
|
| 279 |
+
|
| 280 |
+
return result
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def milliseconds_to_seconds(milliseconds):
|
| 284 |
+
return milliseconds / 1000
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def sample_frames_clue_average(clues_time_intervals, frame_num, fps):
|
| 288 |
+
# 计算每个线索区间的时长
|
| 289 |
+
clues_frame_intervals = [(round(interval[0] * fps), round(interval[1] * fps)) for interval in clues_time_intervals]
|
| 290 |
+
clue_durations = [interval[1] - interval[0] for interval in clues_frame_intervals]
|
| 291 |
+
total_duration = sum(clue_durations)
|
| 292 |
+
# 如果 frame_num 的数量大于等于总帧数, 则直接返回全部帧
|
| 293 |
+
if frame_num >= total_duration:
|
| 294 |
+
return [frame for interval in clues_frame_intervals for frame in range(interval[0], interval[1])]
|
| 295 |
+
frames_per_clue = [int(frame_num * (duration / total_duration)) for duration in clue_durations]
|
| 296 |
+
frame_indices = []
|
| 297 |
+
for i, (interval, num_frames) in enumerate(zip(clues_frame_intervals, frames_per_clue)):
|
| 298 |
+
num_frames = max(1, num_frames)
|
| 299 |
+
seg_size = (interval[1] - interval[0]) / num_frames
|
| 300 |
+
clue_frame_indices = [int(interval[0] + seg_size / 2 + seg_size * idx) for idx in range(num_frames)]
|
| 301 |
+
frame_indices.extend(clue_frame_indices)
|
| 302 |
+
return frame_indices
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def merge_intervals(intervals):
|
| 306 |
+
"""
|
| 307 |
+
Merge overlapping intervals in a list.
|
| 308 |
+
Assumes each interval is a list [start, end].
|
| 309 |
+
"""
|
| 310 |
+
if not intervals:
|
| 311 |
+
return []
|
| 312 |
+
|
| 313 |
+
# Sort intervals by start time
|
| 314 |
+
intervals.sort(key=lambda x: x[0])
|
| 315 |
+
|
| 316 |
+
merged = [intervals[0]]
|
| 317 |
+
|
| 318 |
+
for current in intervals[1:]:
|
| 319 |
+
last_merged = merged[-1]
|
| 320 |
+
|
| 321 |
+
# Check if there is an overlap
|
| 322 |
+
if current[0] <= last_merged[1]:
|
| 323 |
+
# Merge the current interval with the last one
|
| 324 |
+
last_merged[1] = max(last_merged[1], current[1])
|
| 325 |
+
else:
|
| 326 |
+
# No overlap, add current interval
|
| 327 |
+
merged.append(current)
|
| 328 |
+
|
| 329 |
+
return merged
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def calculate_intervals_iou(intervals1, intervals2):
|
| 333 |
+
"""
|
| 334 |
+
Calculate the IoU of two lists of intervals.
|
| 335 |
+
Each list contains intervals represented as [start, end].
|
| 336 |
+
"""
|
| 337 |
+
# Merge overlapping intervals in both lists
|
| 338 |
+
merged1 = merge_intervals(intervals1)
|
| 339 |
+
merged2 = merge_intervals(intervals2)
|
| 340 |
+
|
| 341 |
+
# Calculate total length of intervals for both lists
|
| 342 |
+
def total_length(merged_intervals):
|
| 343 |
+
return sum(end - start for start, end in merged_intervals)
|
| 344 |
+
|
| 345 |
+
length1 = total_length(merged1)
|
| 346 |
+
length2 = total_length(merged2)
|
| 347 |
+
|
| 348 |
+
# Calculate intersection length
|
| 349 |
+
intersection_length = 0
|
| 350 |
+
for interval1 in merged1:
|
| 351 |
+
for interval2 in merged2:
|
| 352 |
+
intersection_start = max(interval1[0], interval2[0])
|
| 353 |
+
intersection_end = min(interval1[1], interval2[1])
|
| 354 |
+
intersection_length += max(0, intersection_end - intersection_start)
|
| 355 |
+
# Calculate union length
|
| 356 |
+
union_length = length1 + length2 - intersection_length
|
| 357 |
+
# IoU is intersection divided by union
|
| 358 |
+
iou = intersection_length / union_length if union_length > 0 else 0
|
| 359 |
+
return iou
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def post_process(response, right_answer, task_mode, duration):
|
| 363 |
+
result = -1
|
| 364 |
+
|
| 365 |
+
if response:
|
| 366 |
+
# 找到 ```json 和 ``` 的位置
|
| 367 |
+
json_start = response.find("```json")
|
| 368 |
+
json_end = response.find("```", json_start + len("```json"))
|
| 369 |
+
|
| 370 |
+
# 如果找到了 json 内容
|
| 371 |
+
if json_start != -1 and json_end != -1:
|
| 372 |
+
json_content = response[json_start + len("```json"):json_end].strip()
|
| 373 |
+
else:
|
| 374 |
+
json_content = ""
|
| 375 |
+
|
| 376 |
+
if json_content:
|
| 377 |
+
if task_mode in ["long_acc", "clue_acc"]:
|
| 378 |
+
json_content = re.sub(r"(?<=:\s)([A-Za-z_]\w*)", r'"\1"', json_content)
|
| 379 |
+
|
| 380 |
+
try:
|
| 381 |
+
model_result = json.loads(json_content)["result"]
|
| 382 |
+
|
| 383 |
+
if task_mode in ["long_acc", "clue_acc"]:
|
| 384 |
+
result = 1 if right_answer == model_result else 0
|
| 385 |
+
elif task_mode == "miou":
|
| 386 |
+
if not isinstance(model_result, list):
|
| 387 |
+
return -1
|
| 388 |
+
if not isinstance(model_result[0], list):
|
| 389 |
+
model_result = [model_result]
|
| 390 |
+
|
| 391 |
+
need_duration = all(interval[0] <= 1 and interval[1] <= 1 for interval in model_result)
|
| 392 |
+
|
| 393 |
+
if need_duration:
|
| 394 |
+
model_result = [[interval[0] * duration, interval[1] * duration] for interval in model_result]
|
| 395 |
+
|
| 396 |
+
right_answer = eval(right_answer)
|
| 397 |
+
|
| 398 |
+
result = calculate_intervals_iou(right_answer, model_result)
|
| 399 |
+
|
| 400 |
+
except Exception as e:
|
| 401 |
+
print(f"Error in parsing JSON: {e}, {json_content}")
|
| 402 |
+
|
| 403 |
+
if result == -1:
|
| 404 |
+
if task_mode in ["long_acc", "clue_acc"]:
|
| 405 |
+
# 检查是否存在大写字母 A-H,认为其为模型答案
|
| 406 |
+
matches = re.findall(r"\b[A-H]\b", response)
|
| 407 |
+
if matches:
|
| 408 |
+
result = 1 if right_answer in matches else 0
|
| 409 |
+
elif task_mode == "miou":
|
| 410 |
+
# 提取所有实数,进行配对
|
| 411 |
+
numbers = re.findall(r"-?\d+\.?\d*", response)
|
| 412 |
+
if len(numbers) < 2:
|
| 413 |
+
result = -1
|
| 414 |
+
else:
|
| 415 |
+
if len(numbers) % 2 != 0:
|
| 416 |
+
numbers = numbers[:-1]
|
| 417 |
+
model_result = [[float(numbers[i]), float(numbers[i + 1])] for i in range(0, len(numbers), 2)]
|
| 418 |
+
|
| 419 |
+
if type(right_answer) is str:
|
| 420 |
+
right_answer = eval(right_answer)
|
| 421 |
+
|
| 422 |
+
result = calculate_intervals_iou(right_answer, model_result)
|
| 423 |
+
|
| 424 |
+
return result
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def get_timestampes(frame_indices, fps):
|
| 428 |
+
seconds = list(map(lambda x: str(round(x / fps, 4)), frame_indices))
|
| 429 |
+
timestamps = ", ".join(seconds)
|
| 430 |
+
return "A total of {frame_num} frames are sampled. Their corresponding timestamps are:\n\n{timestamps}\n\n".format(
|
| 431 |
+
frame_num=len(frame_indices), timestamps=timestamps
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def post_process_open(response):
|
| 436 |
+
model_result = -1
|
| 437 |
+
|
| 438 |
+
if response and response != FAIL_MSG:
|
| 439 |
+
json_start = response.find("```json")
|
| 440 |
+
json_end = response.find("```", json_start + len("```json"))
|
| 441 |
+
|
| 442 |
+
# 如果找到了 json 内容
|
| 443 |
+
if json_start != -1 and json_end != -1:
|
| 444 |
+
json_content = response[json_start + len("```json"):json_end].strip()
|
| 445 |
+
else:
|
| 446 |
+
json_content = ""
|
| 447 |
+
|
| 448 |
+
if json_content:
|
| 449 |
+
try:
|
| 450 |
+
model_result = json.loads(json_content)["result"]
|
| 451 |
+
except Exception as e:
|
| 452 |
+
print(f"Error in parsing JSON: {e}, {json_content}")
|
| 453 |
+
|
| 454 |
+
if model_result == -1:
|
| 455 |
+
model_result = response
|
| 456 |
+
|
| 457 |
+
return model_result
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
def post_process_eval_open(response, step):
|
| 461 |
+
|
| 462 |
+
model_result = -1
|
| 463 |
+
|
| 464 |
+
if response and response != FAIL_MSG:
|
| 465 |
+
|
| 466 |
+
json_start = response.find("```json")
|
| 467 |
+
json_end = response.find("```", json_start + len("```json"))
|
| 468 |
+
|
| 469 |
+
if json_start != -1 and json_end != -1:
|
| 470 |
+
json_content = response[json_start + len("```json"):json_end].strip()
|
| 471 |
+
else:
|
| 472 |
+
json_content = ""
|
| 473 |
+
|
| 474 |
+
if json_content:
|
| 475 |
+
try:
|
| 476 |
+
model_result = json.loads(json_content)["result"]
|
| 477 |
+
except Exception as e:
|
| 478 |
+
print(f"Error in parsing JSON: {e}, {json_content}")
|
| 479 |
+
return -1
|
| 480 |
+
if model_result == -1:
|
| 481 |
+
if step == 1:
|
| 482 |
+
match = re.search(r"[012]", response)
|
| 483 |
+
if match:
|
| 484 |
+
model_result = int(match.group())
|
| 485 |
+
else:
|
| 486 |
+
match = re.search(r"[01]", response)
|
| 487 |
+
if match:
|
| 488 |
+
model_result = int(match.group())
|
| 489 |
+
|
| 490 |
+
return model_result
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
def eval_open_first(model, line):
|
| 494 |
+
|
| 495 |
+
user_prompt = ""
|
| 496 |
+
|
| 497 |
+
user_prompt += f"Question: {line['question']}\n\n"
|
| 498 |
+
|
| 499 |
+
user_prompt += f"The ground truth answer is '{line['answer']}'\n\n"
|
| 500 |
+
|
| 501 |
+
user_prompt += f"The model's prediction is '{line['model_result']}'\n\n"
|
| 502 |
+
|
| 503 |
+
result = model.generate(user_prompt)
|
| 504 |
+
|
| 505 |
+
return result
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def save_step_1_steps(data, step_1_results):
|
| 509 |
+
|
| 510 |
+
# 处理所有结果
|
| 511 |
+
data["step_1_result"] = data["qid"].map(lambda x: post_process_eval_open(step_1_results[x], 1))
|
| 512 |
+
|
| 513 |
+
# 条件更新
|
| 514 |
+
mask = data["step_1_result"].isin([-1, 0, 1])
|
| 515 |
+
data.loc[mask, "step_2_result"] = data.loc[mask, "step_1_result"]
|
| 516 |
+
data.loc[mask, "score"] = data.loc[mask, "step_1_result"]
|
| 517 |
+
|
| 518 |
+
return data
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def eval_open_second(model, line, frame_paths):
|
| 522 |
+
|
| 523 |
+
user_prompt = ""
|
| 524 |
+
|
| 525 |
+
user_prompt += f"Question: {line['question']}\n\n"
|
| 526 |
+
|
| 527 |
+
user_prompt += f"The model's prediction is '{line['model_result']}'\n\n"
|
| 528 |
+
|
| 529 |
+
result = model.generate([user_prompt] + frame_paths)
|
| 530 |
+
|
| 531 |
+
return result
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
def save_step_2_steps(data, step_1_results):
|
| 535 |
+
|
| 536 |
+
# 处理所有结果
|
| 537 |
+
data["score"] = data["qid"].map(lambda x: post_process_eval_open(step_1_results[x], 2))
|
| 538 |
+
|
| 539 |
+
return data
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def clue_frame_paths(clue_frame_root, qid, num_frames=8):
|
| 543 |
+
frame_root = osp.join(clue_frame_root, str(qid))
|
| 544 |
+
os.makedirs(frame_root, exist_ok=True)
|
| 545 |
+
return [osp.join(frame_root, frame_tmpl.format(i, num_frames)) for i in range(1, num_frames + 1)]
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def save_clue_video_frames(data_root, clue_frame_root, video, uid, clue_intervals=None, num_frames=8, fps=-1):
|
| 549 |
+
|
| 550 |
+
if type(uid) is str:
|
| 551 |
+
uid = str(uid)
|
| 552 |
+
|
| 553 |
+
vid_path = osp.join(data_root, video)
|
| 554 |
+
import decord
|
| 555 |
+
vid = decord.VideoReader(vid_path)
|
| 556 |
+
vid_fps = vid.get_avg_fps()
|
| 557 |
+
|
| 558 |
+
if clue_intervals is not None:
|
| 559 |
+
# 1. 合并重叠区间
|
| 560 |
+
merged_intervals = merge_intervals(clue_intervals)
|
| 561 |
+
|
| 562 |
+
if num_frames > 0 and fps < 0:
|
| 563 |
+
# 2. 基于clue_intervals均匀抽帧
|
| 564 |
+
indices = sample_frames_clue_average(merged_intervals, num_frames, vid_fps)
|
| 565 |
+
frame_paths = clue_frame_paths(clue_frame_root, uid, len(indices))
|
| 566 |
+
|
| 567 |
+
# 保存帧
|
| 568 |
+
flag = np.all([osp.exists(p) for p in frame_paths])
|
| 569 |
+
if not flag:
|
| 570 |
+
lock_path = osp.splitext(vid_path)[0] + '.lock'
|
| 571 |
+
with portalocker.Lock(lock_path, 'w', timeout=30):
|
| 572 |
+
if not np.all([osp.exists(p) for p in frame_paths]):
|
| 573 |
+
images = [vid[i].asnumpy() for i in indices]
|
| 574 |
+
images = [Image.fromarray(arr) for arr in images]
|
| 575 |
+
for im, pth in zip(images, frame_paths):
|
| 576 |
+
if not osp.exists(pth):
|
| 577 |
+
im.save(pth)
|
| 578 |
+
|
| 579 |
+
return frame_paths, indices, vid_fps
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def get_chunk_number(filename):
|
| 583 |
+
try:
|
| 584 |
+
num = filename.split("chunk_")[1].split(".zip")[0]
|
| 585 |
+
return int(num)
|
| 586 |
+
except:
|
| 587 |
+
return float('inf')
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def unzip_hf_zip(target_dir):
|
| 591 |
+
target_dir = Path(target_dir)
|
| 592 |
+
|
| 593 |
+
videos_dir = target_dir / "cg_videos_720p"
|
| 594 |
+
clue_videos_dir = target_dir / "cg_clue_videos"
|
| 595 |
+
subtitles_dir = target_dir / "cg_subtitles"
|
| 596 |
+
|
| 597 |
+
if videos_dir.exists() and clue_videos_dir.exists() and subtitles_dir.exists():
|
| 598 |
+
print("all target dirs exist, skip.")
|
| 599 |
+
return
|
| 600 |
+
|
| 601 |
+
videos_dir.mkdir(parents=True, exist_ok=True)
|
| 602 |
+
clue_videos_dir.mkdir(parents=True, exist_ok=True)
|
| 603 |
+
subtitles_dir.mkdir(parents=True, exist_ok=True)
|
| 604 |
+
|
| 605 |
+
video_zips = sorted(target_dir.glob("video_chunk_*.zip"))
|
| 606 |
+
for zip_path in tqdm(video_zips, desc="unzip videos"):
|
| 607 |
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
| 608 |
+
zip_ref.extractall(videos_dir)
|
| 609 |
+
|
| 610 |
+
clue_zips = sorted(target_dir.glob("clue_video_chunk_*.zip"))
|
| 611 |
+
for zip_path in tqdm(clue_zips, desc="unzip clue videos"):
|
| 612 |
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
| 613 |
+
zip_ref.extractall(clue_videos_dir)
|
| 614 |
+
|
| 615 |
+
subtitles_zip = target_dir / "subtitles.zip"
|
| 616 |
+
with zipfile.ZipFile(subtitles_zip, "r") as zip_ref:
|
| 617 |
+
for file in tqdm(zip_ref.namelist(), desc="unzip subtitles"):
|
| 618 |
+
zip_ref.extract(file, subtitles_dir)
|
| 619 |
+
|
| 620 |
+
print("sucessfully unzip all files.")
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__init__.py
ADDED
|
File without changes
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (178 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/global_config.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import subprocess
|
| 2 |
+
texts = []
|
| 3 |
+
images = []
|
| 4 |
+
markers = []
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def reset_texts():
|
| 8 |
+
texts.clear()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def add_text(text):
|
| 12 |
+
texts.append(text)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_raw_texts():
|
| 16 |
+
return [item[2] for item in texts]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_texts():
|
| 20 |
+
return texts
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def reset_images():
|
| 24 |
+
images.clear()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def add_image(image):
|
| 28 |
+
images.append(image)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_images():
|
| 32 |
+
return images
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def reset_markers():
|
| 36 |
+
markers.clear()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def add_marker(marker):
|
| 40 |
+
markers.append(marker)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_markers():
|
| 44 |
+
return markers
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def run_script_safe(script_path):
|
| 48 |
+
try:
|
| 49 |
+
subprocess.run(
|
| 50 |
+
["python3", script_path],
|
| 51 |
+
check=True,
|
| 52 |
+
capture_output=True,
|
| 53 |
+
text=True
|
| 54 |
+
)
|
| 55 |
+
return True # success
|
| 56 |
+
except subprocess.CalledProcessError as e:
|
| 57 |
+
print(f"[ERROR] Failed to run {script_path}")
|
| 58 |
+
print(f"[Return Code]: {e.returncode}")
|
| 59 |
+
print(f"[Stdout]:\n{e.stdout}")
|
| 60 |
+
print(f"[Stderr]:\n{e.stderr}")
|
| 61 |
+
return False # failed
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/chart_type_evaluator.cpython-310.pyc
ADDED
|
Binary file (3.34 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/color_evaluator.cpython-310.pyc
ADDED
|
Binary file (6.49 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/layout_evaluator.cpython-310.pyc
ADDED
|
Binary file (4.64 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_evaluator_prefix.py
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
# sys.path.insert(0, os.environ['PROJECT_PATH'])
|
| 4 |
+
if os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"] not in sys.path:
|
| 5 |
+
sys.path.insert(0, os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"])
|
| 6 |
+
|
| 7 |
+
from matplotlib.projections.polar import PolarAxes
|
| 8 |
+
import matplotlib.pyplot as plt
|
| 9 |
+
from matplotlib.axes import Axes
|
| 10 |
+
from matplotlib.axes._base import _process_plot_var_args
|
| 11 |
+
import squarify
|
| 12 |
+
import networkx.drawing.nx_pylab as nx_pylab
|
| 13 |
+
import networkx as nx
|
| 14 |
+
from mpl_toolkits.mplot3d import Axes3D
|
| 15 |
+
from matplotlib.image import NonUniformImage
|
| 16 |
+
from matplotlib.patches import Ellipse, Circle
|
| 17 |
+
from matplotlib.tri._tripcolor import tripcolor
|
| 18 |
+
from matplotlib_venn._common import VennDiagram
|
| 19 |
+
import inspect
|
| 20 |
+
import warnings
|
| 21 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 22 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
| 23 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
| 24 |
+
|
| 25 |
+
# sys.path.insert(0, os.environ['PROJECT_PATH'])
|
| 26 |
+
|
| 27 |
+
called_functions = {}
|
| 28 |
+
in_decorator = False
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def log_function_specific_for_draw_networkx_labels(func):
|
| 32 |
+
def wrapper(
|
| 33 |
+
G,
|
| 34 |
+
pos,
|
| 35 |
+
labels=None,
|
| 36 |
+
font_size=12,
|
| 37 |
+
font_color="k",
|
| 38 |
+
font_family="sans-serif",
|
| 39 |
+
font_weight="normal",
|
| 40 |
+
alpha=None,
|
| 41 |
+
bbox=None,
|
| 42 |
+
horizontalalignment="center",
|
| 43 |
+
verticalalignment="center",
|
| 44 |
+
ax=None,
|
| 45 |
+
clip_on=True,
|
| 46 |
+
):
|
| 47 |
+
global drawed_colors
|
| 48 |
+
global in_decorator
|
| 49 |
+
|
| 50 |
+
if not in_decorator:
|
| 51 |
+
in_decorator = True
|
| 52 |
+
|
| 53 |
+
file_name = inspect.getfile(func) + "/" + func.__name__
|
| 54 |
+
name = file_name + "-" + func.__name__
|
| 55 |
+
called_functions[name] = called_functions.get(name, 0) + 1
|
| 56 |
+
|
| 57 |
+
result = func(
|
| 58 |
+
G,
|
| 59 |
+
pos,
|
| 60 |
+
labels=labels,
|
| 61 |
+
font_size=font_size,
|
| 62 |
+
font_color=font_color,
|
| 63 |
+
font_family=font_family,
|
| 64 |
+
font_weight=font_weight,
|
| 65 |
+
alpha=alpha,
|
| 66 |
+
bbox=bbox,
|
| 67 |
+
horizontalalignment=horizontalalignment,
|
| 68 |
+
verticalalignment=verticalalignment,
|
| 69 |
+
ax=ax,
|
| 70 |
+
clip_on=clip_on
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
in_decorator = False
|
| 74 |
+
else:
|
| 75 |
+
return func(
|
| 76 |
+
G,
|
| 77 |
+
pos,
|
| 78 |
+
labels=labels,
|
| 79 |
+
font_size=font_size,
|
| 80 |
+
font_color=font_color,
|
| 81 |
+
font_family=font_family,
|
| 82 |
+
font_weight=font_weight,
|
| 83 |
+
alpha=alpha,
|
| 84 |
+
bbox=bbox,
|
| 85 |
+
horizontalalignment=horizontalalignment,
|
| 86 |
+
verticalalignment=verticalalignment,
|
| 87 |
+
ax=ax,
|
| 88 |
+
clip_on=clip_on
|
| 89 |
+
)
|
| 90 |
+
return result
|
| 91 |
+
wrapper.__name__ = func.__name__
|
| 92 |
+
return wrapper
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def log_function_specific_for_draw_networkx_edges(func):
|
| 96 |
+
def wrapper(
|
| 97 |
+
G,
|
| 98 |
+
pos,
|
| 99 |
+
edgelist=None,
|
| 100 |
+
width=1.0,
|
| 101 |
+
edge_color="k",
|
| 102 |
+
style="solid",
|
| 103 |
+
alpha=None,
|
| 104 |
+
arrowstyle=None,
|
| 105 |
+
arrowsize=10,
|
| 106 |
+
edge_cmap=None,
|
| 107 |
+
edge_vmin=None,
|
| 108 |
+
edge_vmax=None,
|
| 109 |
+
ax=None,
|
| 110 |
+
arrows=None,
|
| 111 |
+
label=None,
|
| 112 |
+
node_size=300,
|
| 113 |
+
nodelist=None,
|
| 114 |
+
node_shape="o",
|
| 115 |
+
connectionstyle="arc3",
|
| 116 |
+
min_source_margin=0,
|
| 117 |
+
min_target_margin=0,
|
| 118 |
+
):
|
| 119 |
+
global drawed_colors
|
| 120 |
+
global in_decorator
|
| 121 |
+
|
| 122 |
+
if not in_decorator:
|
| 123 |
+
in_decorator = True
|
| 124 |
+
|
| 125 |
+
file_name = inspect.getfile(func) + "/" + func.__name__
|
| 126 |
+
name = file_name + "-" + func.__name__
|
| 127 |
+
called_functions[name] = called_functions.get(name, 0) + 1
|
| 128 |
+
|
| 129 |
+
result = func(
|
| 130 |
+
G,
|
| 131 |
+
pos,
|
| 132 |
+
edgelist=edgelist,
|
| 133 |
+
width=width,
|
| 134 |
+
edge_color=edge_color,
|
| 135 |
+
style=style,
|
| 136 |
+
alpha=alpha,
|
| 137 |
+
arrowstyle=arrowstyle,
|
| 138 |
+
arrowsize=arrowsize,
|
| 139 |
+
edge_cmap=edge_cmap,
|
| 140 |
+
edge_vmin=edge_vmin,
|
| 141 |
+
edge_vmax=edge_vmax,
|
| 142 |
+
ax=ax,
|
| 143 |
+
arrows=arrows,
|
| 144 |
+
label=label,
|
| 145 |
+
node_size=node_size,
|
| 146 |
+
nodelist=nodelist,
|
| 147 |
+
node_shape=node_shape,
|
| 148 |
+
connectionstyle=connectionstyle,
|
| 149 |
+
min_source_margin=min_source_margin,
|
| 150 |
+
min_target_margin=min_target_margin
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
in_decorator = False
|
| 154 |
+
else:
|
| 155 |
+
return func(
|
| 156 |
+
G,
|
| 157 |
+
pos,
|
| 158 |
+
edgelist=edgelist,
|
| 159 |
+
width=width,
|
| 160 |
+
edge_color=edge_color,
|
| 161 |
+
style=style,
|
| 162 |
+
alpha=alpha,
|
| 163 |
+
arrowstyle=arrowstyle,
|
| 164 |
+
arrowsize=arrowsize,
|
| 165 |
+
edge_cmap=edge_cmap,
|
| 166 |
+
edge_vmin=edge_vmin,
|
| 167 |
+
edge_vmax=edge_vmax,
|
| 168 |
+
ax=ax,
|
| 169 |
+
arrows=arrows,
|
| 170 |
+
label=label,
|
| 171 |
+
node_size=node_size,
|
| 172 |
+
nodelist=nodelist,
|
| 173 |
+
node_shape=node_shape,
|
| 174 |
+
connectionstyle=connectionstyle,
|
| 175 |
+
min_source_margin=min_source_margin,
|
| 176 |
+
min_target_margin=min_target_margin
|
| 177 |
+
)
|
| 178 |
+
return result
|
| 179 |
+
wrapper.__name__ = func.__name__
|
| 180 |
+
return wrapper
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def log_function_specific_for_draw_networkx_nodes(func):
|
| 184 |
+
def wrapper(
|
| 185 |
+
G,
|
| 186 |
+
pos,
|
| 187 |
+
nodelist=None,
|
| 188 |
+
node_size=300,
|
| 189 |
+
node_color="#1f78b4",
|
| 190 |
+
node_shape="o",
|
| 191 |
+
alpha=None,
|
| 192 |
+
cmap=None,
|
| 193 |
+
vmin=None,
|
| 194 |
+
vmax=None,
|
| 195 |
+
ax=None,
|
| 196 |
+
linewidths=None,
|
| 197 |
+
edgecolors=None,
|
| 198 |
+
label=None,
|
| 199 |
+
margins=None,
|
| 200 |
+
):
|
| 201 |
+
global drawed_colors
|
| 202 |
+
global in_decorator
|
| 203 |
+
|
| 204 |
+
if not in_decorator:
|
| 205 |
+
in_decorator = True
|
| 206 |
+
|
| 207 |
+
file_name = inspect.getfile(func) + "/" + func.__name__
|
| 208 |
+
name = file_name + "-" + func.__name__
|
| 209 |
+
called_functions[name] = called_functions.get(name, 0) + 1
|
| 210 |
+
|
| 211 |
+
result = func(
|
| 212 |
+
G,
|
| 213 |
+
pos,
|
| 214 |
+
nodelist=nodelist,
|
| 215 |
+
node_size=node_size,
|
| 216 |
+
node_color=node_color,
|
| 217 |
+
node_shape=node_shape,
|
| 218 |
+
alpha=alpha,
|
| 219 |
+
cmap=cmap,
|
| 220 |
+
vmin=vmin,
|
| 221 |
+
vmax=vmax,
|
| 222 |
+
ax=ax,
|
| 223 |
+
linewidths=linewidths,
|
| 224 |
+
edgecolors=edgecolors,
|
| 225 |
+
label=label,
|
| 226 |
+
margins=margins
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
in_decorator = False
|
| 230 |
+
else:
|
| 231 |
+
return func(
|
| 232 |
+
G,
|
| 233 |
+
pos,
|
| 234 |
+
nodelist=nodelist,
|
| 235 |
+
node_size=node_size,
|
| 236 |
+
node_color=node_color,
|
| 237 |
+
node_shape=node_shape,
|
| 238 |
+
alpha=alpha,
|
| 239 |
+
cmap=cmap,
|
| 240 |
+
vmin=vmin,
|
| 241 |
+
vmax=vmax,
|
| 242 |
+
ax=ax,
|
| 243 |
+
linewidths=linewidths,
|
| 244 |
+
edgecolors=edgecolors,
|
| 245 |
+
label=label,
|
| 246 |
+
margins=margins
|
| 247 |
+
)
|
| 248 |
+
return result
|
| 249 |
+
wrapper.__name__ = func.__name__
|
| 250 |
+
return wrapper
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def log_function(func):
|
| 254 |
+
def wrapper(*args, **kwargs):
|
| 255 |
+
global in_decorator
|
| 256 |
+
if not in_decorator:
|
| 257 |
+
in_decorator = True
|
| 258 |
+
if len(args) > 0 and isinstance(
|
| 259 |
+
args[0], PolarAxes) and func.__name__ == "plot":
|
| 260 |
+
file_name = inspect.getfile(func)
|
| 261 |
+
file_name += "_polar"
|
| 262 |
+
else:
|
| 263 |
+
file_name = inspect.getfile(func)
|
| 264 |
+
name = file_name + "-" + func.__name__
|
| 265 |
+
called_functions[name] = called_functions.get(name, 0) + 1
|
| 266 |
+
result = func(*args, **kwargs)
|
| 267 |
+
in_decorator = False
|
| 268 |
+
return result
|
| 269 |
+
else:
|
| 270 |
+
return func(*args, **kwargs)
|
| 271 |
+
wrapper.__name__ = func.__name__
|
| 272 |
+
return wrapper
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
Axes.bar = log_function(Axes.bar)
|
| 276 |
+
Axes.barh = log_function(Axes.barh) # The same as the bar
|
| 277 |
+
|
| 278 |
+
# _process_plot_var_args._makeline = log_function(_process_plot_var_args._makeline)
|
| 279 |
+
Axes.plot = log_function(Axes.plot) # Special Case for polar plot
|
| 280 |
+
Axes.axhline = log_function(Axes.axhline)
|
| 281 |
+
Axes.axvline = log_function(Axes.axvline)
|
| 282 |
+
Axes.axvspan = log_function(Axes.axvspan)
|
| 283 |
+
Axes.axhspan = log_function(Axes.axhspan)
|
| 284 |
+
Axes.hlines = log_function(Axes.hlines)
|
| 285 |
+
Axes.vlines = log_function(Axes.vlines)
|
| 286 |
+
|
| 287 |
+
Axes.errorbar = log_function(Axes.errorbar) # The same as the line
|
| 288 |
+
|
| 289 |
+
Axes.boxplot = log_function(Axes.boxplot)
|
| 290 |
+
|
| 291 |
+
Axes.violinplot = log_function(Axes.violinplot)
|
| 292 |
+
Axes.violin = log_function(Axes.violin)
|
| 293 |
+
|
| 294 |
+
Axes.hist = log_function(Axes.hist)
|
| 295 |
+
|
| 296 |
+
# Axes._fill_between_x_or_y = log_function(Axes._fill_between_x_or_y)
|
| 297 |
+
Axes.fill_between = log_function(Axes.fill_between)
|
| 298 |
+
Axes.fill_betweenx = log_function(Axes.fill_betweenx)
|
| 299 |
+
|
| 300 |
+
Axes.scatter = log_function(Axes.scatter)
|
| 301 |
+
|
| 302 |
+
nx_pylab.draw_networkx_nodes = log_function_specific_for_draw_networkx_nodes(
|
| 303 |
+
nx_pylab.draw_networkx_nodes)
|
| 304 |
+
nx_pylab.draw_networkx_edges = log_function_specific_for_draw_networkx_edges(
|
| 305 |
+
nx_pylab.draw_networkx_edges)
|
| 306 |
+
nx_pylab.draw_networkx_labels = log_function_specific_for_draw_networkx_labels(
|
| 307 |
+
nx_pylab.draw_networkx_labels)
|
| 308 |
+
|
| 309 |
+
# nx_pylab.draw_networkx_nodes = log_function_specific_for_draw_networkx_nodes(nx_pylab.draw_networkx_nodes)
|
| 310 |
+
# nx_pylab.draw_networkx_edges = log_function_specific_for_draw_networkx_edges(nx_pylab.draw_networkx_edges)
|
| 311 |
+
# nx_pylab.draw_networkx_labels = log_function_specific_for_draw_networkx_labels(nx_pylab.draw_networkx_labels)
|
| 312 |
+
|
| 313 |
+
nx.draw_networkx_nodes = log_function_specific_for_draw_networkx_nodes(
|
| 314 |
+
nx.draw_networkx_nodes)
|
| 315 |
+
nx.draw_networkx_edges = log_function_specific_for_draw_networkx_edges(
|
| 316 |
+
nx.draw_networkx_edges)
|
| 317 |
+
nx.draw_networkx_labels = log_function_specific_for_draw_networkx_labels(
|
| 318 |
+
nx.draw_networkx_labels)
|
| 319 |
+
|
| 320 |
+
Axes.quiver = log_function(Axes.quiver)
|
| 321 |
+
|
| 322 |
+
Axes3D.scatter = log_function(Axes3D.scatter)
|
| 323 |
+
Axes3D.plot = log_function(Axes3D.plot)
|
| 324 |
+
Axes3D.plot_surface = log_function(Axes3D.plot_surface)
|
| 325 |
+
Axes3D.bar3d = log_function(Axes3D.bar3d)
|
| 326 |
+
Axes3D.bar = log_function(Axes3D.bar)
|
| 327 |
+
Axes3D.add_collection3d = log_function(Axes3D.add_collection3d)
|
| 328 |
+
|
| 329 |
+
Axes.pie = log_function(Axes.pie)
|
| 330 |
+
|
| 331 |
+
Axes.fill = log_function(Axes.fill)
|
| 332 |
+
|
| 333 |
+
squarify.plot = log_function(squarify.plot)
|
| 334 |
+
|
| 335 |
+
Axes.imshow = log_function(Axes.imshow)
|
| 336 |
+
Axes.pcolor = log_function(Axes.pcolor)
|
| 337 |
+
NonUniformImage.__init__ = log_function(NonUniformImage.__init__)
|
| 338 |
+
|
| 339 |
+
Axes.contour = log_function(Axes.contour)
|
| 340 |
+
Axes.contourf = log_function(Axes.contourf)
|
| 341 |
+
|
| 342 |
+
Ellipse.__init__ = log_function(Ellipse.__init__)
|
| 343 |
+
Axes.broken_barh = log_function(Axes.broken_barh)
|
| 344 |
+
|
| 345 |
+
Axes.tripcolor = log_function(Axes.tripcolor)
|
| 346 |
+
|
| 347 |
+
VennDiagram.__init__ = log_function(VennDiagram.__init__)
|
| 348 |
+
|
| 349 |
+
Circle.__init__ = log_function(Circle.__init__)
|
| 350 |
+
|
| 351 |
+
# Axes.plot = log_function(Axes.plot)
|
| 352 |
+
# Axes.loglog = log_function(Axes.loglog)
|
| 353 |
+
# Axes.scatter = log_function(Axes.scatter)
|
| 354 |
+
# Axes.bar = log_function(Axes.bar)
|
| 355 |
+
# Axes.barh = log_function(Axes.barh)
|
| 356 |
+
# Axes.axhline = log_function(Axes.axhline)
|
| 357 |
+
# Axes.axvline = log_function(Axes.axvline)
|
| 358 |
+
# Axes.errorbar = log_function(Axes.errorbar)
|
| 359 |
+
# Axes.matshow = log_function(Axes.matshow)
|
| 360 |
+
# Axes.hist = log_function(Axes.hist)
|
| 361 |
+
# Axes.pie = log_function(Axes.pie)
|
| 362 |
+
# Axes.boxplot = log_function(Axes.boxplot)
|
| 363 |
+
# Axes.arrow = log_function(Axes.arrow)
|
| 364 |
+
# Axes.fill_between = log_function(Axes.fill_between)
|
| 365 |
+
# Axes.fill_betweenx = log_function(Axes.fill_betweenx)
|
| 366 |
+
# Axes.imshow = log_function(Axes.imshow)
|
| 367 |
+
# Axes.contour = log_function(Axes.contour)
|
| 368 |
+
# Axes.contourf = log_function(Axes.contourf)
|
| 369 |
+
# Axes.violinplot = log_function(Axes.violinplot)
|
| 370 |
+
# Axes.violin = log_function(Axes.violin)
|
| 371 |
+
|
| 372 |
+
# squarify.plot = log_function(squarify.plot)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/color_evaluator.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
|
| 4 |
+
# from dotenv import load_dotenv
|
| 5 |
+
# load_dotenv()
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
# sys.path.insert(0, os.environ["PROJECT_PATH"])
|
| 10 |
+
|
| 11 |
+
from ..eval_configs.global_config import run_script_safe
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# from skimage.color import deltaE_cie76
|
| 15 |
+
# from skimage.color import rgb2lab
|
| 16 |
+
from itertools import permutations
|
| 17 |
+
from multiprocessing import Pool, cpu_count
|
| 18 |
+
from .color_utils import group_color, calculate_similarity_single
|
| 19 |
+
|
| 20 |
+
from multiprocessing import Process
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# def hex_to_rgb(hex_color):
|
| 24 |
+
# hex_color = hex_color.lstrip('#')
|
| 25 |
+
# return tuple(int(hex_color[i:i+2], 16) / 255.0 for i in (0, 2, 4))
|
| 26 |
+
|
| 27 |
+
# def calculate_similarity_single(c1, c2):
|
| 28 |
+
# c1_file = c1.split("--")[0]
|
| 29 |
+
# c2_file = c2.split("--")[0]
|
| 30 |
+
|
| 31 |
+
# c1_color = c1.split("--")[1]
|
| 32 |
+
# c2_color = c2.split("--")[1]
|
| 33 |
+
|
| 34 |
+
# if c1_file != c2_file:
|
| 35 |
+
# return 0
|
| 36 |
+
# elif c1_color.startswith("#") and c2_color.startswith("#"):
|
| 37 |
+
|
| 38 |
+
# c1_color = rgb2lab(np.array([hex_to_rgb(c1_color)]))
|
| 39 |
+
# c2_color = rgb2lab(np.array([hex_to_rgb(c2_color)]))
|
| 40 |
+
|
| 41 |
+
# return max(0, 1 - deltaE_cie76(c1_color, c2_color)[0] / 100)
|
| 42 |
+
# elif not c1_color.startswith("#") and not c2_color.startswith("#"):
|
| 43 |
+
|
| 44 |
+
# return 1 if c1_color == c2_color else 0
|
| 45 |
+
# else:
|
| 46 |
+
# return 0
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def calculate_similarity_for_permutation(args):
|
| 50 |
+
shorter, perm = args
|
| 51 |
+
current_similarity = sum(
|
| 52 |
+
calculate_similarity_single(c1, c2) for c1, c2 in zip(shorter, perm)
|
| 53 |
+
)
|
| 54 |
+
return current_similarity
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class ColorEvaluator:
|
| 58 |
+
|
| 59 |
+
def __init__(self) -> None:
|
| 60 |
+
self.metrics = {
|
| 61 |
+
"precision": 0,
|
| 62 |
+
"recall": 0,
|
| 63 |
+
"f1": 0,
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
def __call__(self, generation_code_file, golden_code_file):
|
| 67 |
+
# print("genearion_code_file", generation_code_file)
|
| 68 |
+
# print("golden_code_file", golden_code_file)
|
| 69 |
+
|
| 70 |
+
self.golden_code_file = golden_code_file
|
| 71 |
+
|
| 72 |
+
# print(f"generation_code_file: {generation_code_file}")
|
| 73 |
+
generation_colors = self._log_colors(generation_code_file)
|
| 74 |
+
# print(f"golden_code_file: {golden_code_file}")
|
| 75 |
+
golden_colors = self._log_colors(golden_code_file)
|
| 76 |
+
# print(f"len(generation_colors): {len(generation_colors)}")
|
| 77 |
+
# print(f"len(golden_colors): {len(golden_colors)}")
|
| 78 |
+
|
| 79 |
+
self._calculate_metrics(generation_colors, golden_colors)
|
| 80 |
+
|
| 81 |
+
# [TAG] What is this for?
|
| 82 |
+
# redunant_file = os.environ["PROJECT_PATH"] + "/" + os.path.basename(golden_code_file).replace(".py", ".pdf")
|
| 83 |
+
# os.remove(redunant_file)
|
| 84 |
+
# print(self.metrics)
|
| 85 |
+
|
| 86 |
+
def _log_colors(self, code_file):
|
| 87 |
+
"""
|
| 88 |
+
Get text objects of the code
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
with open(code_file, "r") as f:
|
| 92 |
+
lines = f.readlines()
|
| 93 |
+
code = "".join(lines)
|
| 94 |
+
|
| 95 |
+
prefix = self._get_prefix()
|
| 96 |
+
output_file = code_file.replace(".py", "_log_colors.txt")
|
| 97 |
+
suffix = self._get_suffix(output_file)
|
| 98 |
+
code = prefix + code + suffix
|
| 99 |
+
|
| 100 |
+
code_log_texts_file = code_file.replace(".py", "_log_colors.py")
|
| 101 |
+
with open(code_log_texts_file, "w") as f:
|
| 102 |
+
f.write(code)
|
| 103 |
+
|
| 104 |
+
# os.system(f"python3 {code_log_texts_file}")
|
| 105 |
+
success = run_script_safe(code_log_texts_file)
|
| 106 |
+
if not success:
|
| 107 |
+
print("Skip downstream logic due to previous failure.")
|
| 108 |
+
# optionally return default result or continue
|
| 109 |
+
|
| 110 |
+
if os.path.exists(output_file):
|
| 111 |
+
with open(output_file, "r") as f:
|
| 112 |
+
colors = f.read()
|
| 113 |
+
try:
|
| 114 |
+
colors = eval(colors)
|
| 115 |
+
except BaseException:
|
| 116 |
+
colors = []
|
| 117 |
+
os.remove(output_file)
|
| 118 |
+
else:
|
| 119 |
+
colors = []
|
| 120 |
+
|
| 121 |
+
os.remove(code_log_texts_file)
|
| 122 |
+
|
| 123 |
+
# pdf_file = re.findall(r"plt\.savefig\('(.*)'\)", code)
|
| 124 |
+
# if len(pdf_file) != 0:
|
| 125 |
+
# pdf_file = pdf_file[0]
|
| 126 |
+
# if os.path.basename(pdf_file) == pdf_file:
|
| 127 |
+
# os.remove(pdf_file)
|
| 128 |
+
|
| 129 |
+
return colors
|
| 130 |
+
|
| 131 |
+
def _calculate_metrics(
|
| 132 |
+
self, generation_colors: List[Tuple], golden_colors: List[Tuple]
|
| 133 |
+
):
|
| 134 |
+
generation_colors = list(generation_colors)
|
| 135 |
+
golden_colors = list(golden_colors)
|
| 136 |
+
|
| 137 |
+
if len(generation_colors) == 0 or len(golden_colors) == 0:
|
| 138 |
+
self.metrics["precision"] = 0
|
| 139 |
+
self.metrics["recall"] = 0
|
| 140 |
+
self.metrics["f1"] = 0
|
| 141 |
+
return
|
| 142 |
+
|
| 143 |
+
group_generation_colors = group_color(generation_colors)
|
| 144 |
+
group_golden_colors = group_color(golden_colors)
|
| 145 |
+
|
| 146 |
+
# print("group_generation_colors", group_generation_colors)
|
| 147 |
+
# print("group_golden_colors", group_golden_colors)
|
| 148 |
+
|
| 149 |
+
# print("generation_colors", generation_colors)
|
| 150 |
+
# print("golden_colors", golden_colors)
|
| 151 |
+
|
| 152 |
+
def calculate_similarity_serial(lst1, lst2):
|
| 153 |
+
if len(lst1) == 0 or len(lst2) == 0:
|
| 154 |
+
return 0
|
| 155 |
+
|
| 156 |
+
shorter, longer = (lst1, lst2) if len(lst1) <= len(lst2) else (lst2, lst1)
|
| 157 |
+
|
| 158 |
+
max_total_similarity = float("-inf")
|
| 159 |
+
best_index = None
|
| 160 |
+
|
| 161 |
+
for perm in permutations(longer, len(shorter)):
|
| 162 |
+
current_similarity = sum(
|
| 163 |
+
calculate_similarity_single(c1, c2) for c1, c2 in zip(shorter, perm)
|
| 164 |
+
)
|
| 165 |
+
current_similarity /= len(shorter)
|
| 166 |
+
|
| 167 |
+
if current_similarity > max_total_similarity:
|
| 168 |
+
max_total_similarity = current_similarity
|
| 169 |
+
best_index = [shorter, perm]
|
| 170 |
+
|
| 171 |
+
# best_index[0] = sorted(best_index[0])
|
| 172 |
+
# best_index[1] = sorted(best_index[1])
|
| 173 |
+
# print("best_index", best_index)
|
| 174 |
+
for i1, i2 in zip(best_index[0], best_index[1]):
|
| 175 |
+
print(i1, i2)
|
| 176 |
+
tmp_similarity = sum(
|
| 177 |
+
calculate_similarity_single(c1, c2)
|
| 178 |
+
for c1, c2 in zip(best_index[0], best_index[1])
|
| 179 |
+
) / len(shorter)
|
| 180 |
+
print("tmp_similarity", tmp_similarity)
|
| 181 |
+
|
| 182 |
+
return max_total_similarity
|
| 183 |
+
|
| 184 |
+
def calculate_similarity_parallel(lst1, lst2):
|
| 185 |
+
if len(lst1) == 0 or len(lst2) == 0:
|
| 186 |
+
return 0
|
| 187 |
+
|
| 188 |
+
shorter, longer = (lst1, lst2) if len(lst1) <= len(lst2) else (lst2, lst1)
|
| 189 |
+
perms = permutations(longer, len(shorter))
|
| 190 |
+
|
| 191 |
+
# create processes according to the number of CPUs
|
| 192 |
+
with Pool(processes=cpu_count()) as pool:
|
| 193 |
+
similarities = pool.map(
|
| 194 |
+
calculate_similarity_for_permutation,
|
| 195 |
+
[(shorter, perm) for perm in perms],
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# print("length of similarities", len(similarities))
|
| 199 |
+
|
| 200 |
+
# indexes = [item[0] for item in similarities]
|
| 201 |
+
# similarities = [item[1] for item in similarities]
|
| 202 |
+
|
| 203 |
+
# get max similarity and its index
|
| 204 |
+
# max_total_similarity = max(similarities)
|
| 205 |
+
# max_index = similarities.index(max_total_similarity)
|
| 206 |
+
# index = indexes[max_index]
|
| 207 |
+
|
| 208 |
+
# max_total_similarity = max(similarities)
|
| 209 |
+
# index[0] = sorted(index[0])
|
| 210 |
+
# index[1] = sorted(index[1])
|
| 211 |
+
# for i1, i2 in zip(index[0], index[1]):
|
| 212 |
+
# print(i1, i2)
|
| 213 |
+
|
| 214 |
+
# tmp_similarity = sum( calculate_similarity_single(c1, c2) for c1, c2 in zip(index[0], index[1]) ) / len(shorter)
|
| 215 |
+
# print("tmp_similarity", tmp_similarity)
|
| 216 |
+
# print("best_index", index)
|
| 217 |
+
|
| 218 |
+
return max(similarities)
|
| 219 |
+
|
| 220 |
+
# merge keys in group_generation_colors and group_golden_colors
|
| 221 |
+
merged_color_group = list(
|
| 222 |
+
set(list(group_generation_colors.keys()) + list(group_golden_colors.keys()))
|
| 223 |
+
)
|
| 224 |
+
for color in merged_color_group:
|
| 225 |
+
if color not in group_generation_colors:
|
| 226 |
+
group_generation_colors[color] = []
|
| 227 |
+
if color not in group_golden_colors:
|
| 228 |
+
group_golden_colors[color] = []
|
| 229 |
+
|
| 230 |
+
max_set_similarity = 0
|
| 231 |
+
|
| 232 |
+
for color in merged_color_group:
|
| 233 |
+
max_set_similarity += calculate_similarity_parallel(
|
| 234 |
+
group_generation_colors[color], group_golden_colors[color]
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# self.metrics["similarity"] = calculate_similarity_parallel(generation_colors, golden_colors)
|
| 238 |
+
# max_set_similarity = calculate_similarity_parallel(generation_colors, golden_colors)
|
| 239 |
+
self.metrics["precision"] = (
|
| 240 |
+
max_set_similarity / len(generation_colors)
|
| 241 |
+
if len(generation_colors) != 0
|
| 242 |
+
else 0
|
| 243 |
+
)
|
| 244 |
+
if "box" in self.golden_code_file:
|
| 245 |
+
self.metrics["recall"] = (
|
| 246 |
+
max_set_similarity / len(golden_colors)
|
| 247 |
+
if len(golden_colors) != 0
|
| 248 |
+
else 0
|
| 249 |
+
)
|
| 250 |
+
else:
|
| 251 |
+
self.metrics["recall"] = max_set_similarity / len(golden_colors)
|
| 252 |
+
if self.metrics["precision"] + self.metrics["recall"] == 0:
|
| 253 |
+
self.metrics["f1"] = 0
|
| 254 |
+
else:
|
| 255 |
+
self.metrics["f1"] = (
|
| 256 |
+
2
|
| 257 |
+
* self.metrics["precision"]
|
| 258 |
+
* self.metrics["recall"]
|
| 259 |
+
/ (self.metrics["precision"] + self.metrics["recall"])
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
return
|
| 263 |
+
|
| 264 |
+
def _get_prefix(self):
|
| 265 |
+
with open(
|
| 266 |
+
os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"]
|
| 267 |
+
+ "/evaluator/color_evaluator_prefix.py",
|
| 268 |
+
"r",
|
| 269 |
+
) as f:
|
| 270 |
+
prefix = f.read()
|
| 271 |
+
return prefix
|
| 272 |
+
|
| 273 |
+
def _get_suffix(self, output_file):
|
| 274 |
+
return f"""
|
| 275 |
+
drawed_colors = list(set(drawed_colors))
|
| 276 |
+
drawed_colors = update_drawed_colors(drawed_objects)
|
| 277 |
+
if len(drawed_colors) > 10:
|
| 278 |
+
drawed_colors = filter_color(drawed_colors)
|
| 279 |
+
# print("drawed_colors", drawed_colors)
|
| 280 |
+
# print("len(drawed_colors)", len(drawed_colors))
|
| 281 |
+
# print("Length of drawed_obejcts", len(drawed_objects))
|
| 282 |
+
# print("drawed_objects", drawed_objects)
|
| 283 |
+
with open('{output_file}', 'w') as f:
|
| 284 |
+
f.write(str(drawed_colors))
|
| 285 |
+
"""
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
if __name__ == "__main__":
|
| 289 |
+
# sys.path.insert(0, '/home/yc21/project/Princess-s-CHI')
|
| 290 |
+
|
| 291 |
+
evaluator = ColorEvaluator()
|
| 292 |
+
# evaluator = TextEvaluator()
|
| 293 |
+
|
| 294 |
+
golden_code_dir = f"{os.environ['PROJECT_PATH']}/dataset/ori_500/"
|
| 295 |
+
generation_code_dir = f"{os.environ['PROJECT_PATH']}/results/chart2code_Phi-3-vision-128k-instruct_DirectAgent_results/direct/"
|
| 296 |
+
|
| 297 |
+
# list python files in the directory
|
| 298 |
+
golden_code_files = [f for f in os.listdir(golden_code_dir) if f.endswith(".py")]
|
| 299 |
+
|
| 300 |
+
# for golden_code_file in golden_code_files:
|
| 301 |
+
# print(golden_code_file)
|
| 302 |
+
# generation_code_file = generation_code_dir + golden_code_file
|
| 303 |
+
# evaluator(generation_code_file, golden_code_dir + golden_code_file)
|
| 304 |
+
|
| 305 |
+
# write a multi-processing version
|
| 306 |
+
def _muti_process_run(rank, data, num_processes):
|
| 307 |
+
for i in range(rank, len(data), num_processes):
|
| 308 |
+
golden_code_file = data[i]
|
| 309 |
+
generation_code_file = generation_code_dir + golden_code_file
|
| 310 |
+
evaluator(generation_code_file, golden_code_dir + golden_code_file)
|
| 311 |
+
|
| 312 |
+
evaluator = ColorEvaluator()
|
| 313 |
+
processes = []
|
| 314 |
+
num_processes = 20
|
| 315 |
+
for rank in range(num_processes):
|
| 316 |
+
p = Process(
|
| 317 |
+
target=_muti_process_run, args=(rank, golden_code_files, num_processes)
|
| 318 |
+
)
|
| 319 |
+
p.start()
|
| 320 |
+
processes.append(p)
|
| 321 |
+
for p in processes:
|
| 322 |
+
p.join()
|
| 323 |
+
|
| 324 |
+
# golden_code_file = f"{os.environ['PROJECT_PATH']}/dataset/ori_500/line_5.py"
|
| 325 |
+
# generation_code_file = f"{os.environ['PROJECT_PATH']}/results/chart2code_gpt-4-vision-preview_DirectAgent_results/direct/line_5.py"
|
| 326 |
+
# evaluator(generation_code_file, golden_code_file)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/color_evaluator_prefix.py
ADDED
|
@@ -0,0 +1,835 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # flake8: noqa
|
| 2 |
+
import os
|
| 3 |
+
import squarify
|
| 4 |
+
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 8 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
| 9 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
| 10 |
+
|
| 11 |
+
import sys
|
| 12 |
+
|
| 13 |
+
if os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"] not in sys.path:
|
| 14 |
+
sys.path.insert(0, os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"])
|
| 15 |
+
|
| 16 |
+
import networkx
|
| 17 |
+
import matplotlib
|
| 18 |
+
import matplotlib.pyplot as plt
|
| 19 |
+
import numpy as np
|
| 20 |
+
import networkx as nx
|
| 21 |
+
from matplotlib.axes._base import _process_plot_var_args
|
| 22 |
+
from matplotlib.axes._axes import Axes
|
| 23 |
+
from mpl_toolkits.mplot3d import Axes3D
|
| 24 |
+
import matplotlib.colors as mcolors
|
| 25 |
+
import networkx.drawing.nx_pylab as nx_pylab
|
| 26 |
+
from matplotlib.projections.polar import PolarAxes
|
| 27 |
+
from matplotlib.image import NonUniformImage
|
| 28 |
+
from matplotlib.patches import Ellipse, Circle
|
| 29 |
+
from matplotlib_venn._common import VennDiagram
|
| 30 |
+
import inspect
|
| 31 |
+
from evaluator.color_utils import filter_color
|
| 32 |
+
# from chart2code.utils.evaluator.color_utils import filter_color
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
drawed_colors = []
|
| 36 |
+
drawed_objects = {}
|
| 37 |
+
in_decorator = False
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def convert_color_to_hex(color):
|
| 41 |
+
'Convert color from name, RGBA, or hex to a hex format.'
|
| 42 |
+
try:
|
| 43 |
+
# First, try to convert from color name to RGBA to hex
|
| 44 |
+
if isinstance(color, str):
|
| 45 |
+
# Check if it's already a hex color (start with '#' and length
|
| 46 |
+
# either 7 or 9)
|
| 47 |
+
if color.startswith('#') and (len(color) == 7 or len(color) == 9):
|
| 48 |
+
return color.upper()
|
| 49 |
+
else:
|
| 50 |
+
return mcolors.to_hex(mcolors.to_rgba(color)).upper()
|
| 51 |
+
# Then, check if it's in RGBA format
|
| 52 |
+
elif isinstance(color, (list, tuple, np.ndarray)) and (len(color) == 4 or len(color) == 3):
|
| 53 |
+
return mcolors.to_hex(color).upper()
|
| 54 |
+
else:
|
| 55 |
+
raise ValueError("Unsupported color format")
|
| 56 |
+
except ValueError as e:
|
| 57 |
+
print(color)
|
| 58 |
+
print("Error converting color:", e)
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def log_function_specific_for_draw_networkx_labels(func):
|
| 63 |
+
def wrapper(
|
| 64 |
+
G,
|
| 65 |
+
pos,
|
| 66 |
+
labels=None,
|
| 67 |
+
font_size=12,
|
| 68 |
+
font_color="k",
|
| 69 |
+
font_family="sans-serif",
|
| 70 |
+
font_weight="normal",
|
| 71 |
+
alpha=None,
|
| 72 |
+
bbox=None,
|
| 73 |
+
horizontalalignment="center",
|
| 74 |
+
verticalalignment="center",
|
| 75 |
+
ax=None,
|
| 76 |
+
clip_on=True,
|
| 77 |
+
):
|
| 78 |
+
global drawed_colors
|
| 79 |
+
global in_decorator
|
| 80 |
+
|
| 81 |
+
if not in_decorator:
|
| 82 |
+
in_decorator = True
|
| 83 |
+
|
| 84 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 85 |
+
|
| 86 |
+
result = func(
|
| 87 |
+
G,
|
| 88 |
+
pos,
|
| 89 |
+
labels=labels,
|
| 90 |
+
font_size=font_size,
|
| 91 |
+
font_color=font_color,
|
| 92 |
+
font_family=font_family,
|
| 93 |
+
font_weight=font_weight,
|
| 94 |
+
alpha=alpha,
|
| 95 |
+
bbox=bbox,
|
| 96 |
+
horizontalalignment=horizontalalignment,
|
| 97 |
+
verticalalignment=verticalalignment,
|
| 98 |
+
ax=ax,
|
| 99 |
+
clip_on=clip_on
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
for item in result.values():
|
| 103 |
+
color = convert_color_to_hex(item.get_color())
|
| 104 |
+
drawed_colors.append(func_name + "--" + color)
|
| 105 |
+
drawed_objects[func_name + "--" + color] = item
|
| 106 |
+
|
| 107 |
+
in_decorator = False
|
| 108 |
+
else:
|
| 109 |
+
return func(
|
| 110 |
+
G,
|
| 111 |
+
pos,
|
| 112 |
+
labels=labels,
|
| 113 |
+
font_size=font_size,
|
| 114 |
+
font_color=font_color,
|
| 115 |
+
font_family=font_family,
|
| 116 |
+
font_weight=font_weight,
|
| 117 |
+
alpha=alpha,
|
| 118 |
+
bbox=bbox,
|
| 119 |
+
horizontalalignment=horizontalalignment,
|
| 120 |
+
verticalalignment=verticalalignment,
|
| 121 |
+
ax=ax,
|
| 122 |
+
clip_on=clip_on
|
| 123 |
+
)
|
| 124 |
+
return result
|
| 125 |
+
wrapper.__name__ = func.__name__
|
| 126 |
+
return wrapper
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def log_function_specific_for_draw_networkx_edges(func):
|
| 130 |
+
def wrapper(
|
| 131 |
+
G,
|
| 132 |
+
pos,
|
| 133 |
+
edgelist=None,
|
| 134 |
+
width=1.0,
|
| 135 |
+
edge_color="k",
|
| 136 |
+
style="solid",
|
| 137 |
+
alpha=None,
|
| 138 |
+
arrowstyle=None,
|
| 139 |
+
arrowsize=10,
|
| 140 |
+
edge_cmap=None,
|
| 141 |
+
edge_vmin=None,
|
| 142 |
+
edge_vmax=None,
|
| 143 |
+
ax=None,
|
| 144 |
+
arrows=None,
|
| 145 |
+
label=None,
|
| 146 |
+
node_size=300,
|
| 147 |
+
nodelist=None,
|
| 148 |
+
node_shape="o",
|
| 149 |
+
connectionstyle="arc3",
|
| 150 |
+
min_source_margin=0,
|
| 151 |
+
min_target_margin=0,
|
| 152 |
+
):
|
| 153 |
+
global drawed_colors
|
| 154 |
+
global in_decorator
|
| 155 |
+
|
| 156 |
+
if not in_decorator:
|
| 157 |
+
in_decorator = True
|
| 158 |
+
|
| 159 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 160 |
+
|
| 161 |
+
result = func(
|
| 162 |
+
G,
|
| 163 |
+
pos,
|
| 164 |
+
edgelist=edgelist,
|
| 165 |
+
width=width,
|
| 166 |
+
edge_color=edge_color,
|
| 167 |
+
style=style,
|
| 168 |
+
alpha=alpha,
|
| 169 |
+
arrowstyle=arrowstyle,
|
| 170 |
+
arrowsize=arrowsize,
|
| 171 |
+
edge_cmap=edge_cmap,
|
| 172 |
+
edge_vmin=edge_vmin,
|
| 173 |
+
edge_vmax=edge_vmax,
|
| 174 |
+
ax=ax,
|
| 175 |
+
arrows=arrows,
|
| 176 |
+
label=label,
|
| 177 |
+
node_size=node_size,
|
| 178 |
+
nodelist=nodelist,
|
| 179 |
+
node_shape=node_shape,
|
| 180 |
+
connectionstyle=connectionstyle,
|
| 181 |
+
min_source_margin=min_source_margin,
|
| 182 |
+
min_target_margin=min_target_margin
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
if isinstance(result, list):
|
| 186 |
+
for line in result:
|
| 187 |
+
color = convert_color_to_hex(line.get_facecolor())
|
| 188 |
+
drawed_colors.append(func_name + "--" + color)
|
| 189 |
+
if len(result) > 0:
|
| 190 |
+
drawed_objects[func_name + "--" + color] = result
|
| 191 |
+
else:
|
| 192 |
+
for item in result.get_edgecolors().tolist():
|
| 193 |
+
color = convert_color_to_hex(item)
|
| 194 |
+
drawed_colors.append(func_name + "--" + color)
|
| 195 |
+
if len(result.get_edgecolors().tolist()) > 0:
|
| 196 |
+
drawed_objects[func_name + "--" +
|
| 197 |
+
color] = result # ! Attention
|
| 198 |
+
|
| 199 |
+
in_decorator = False
|
| 200 |
+
else:
|
| 201 |
+
return func(
|
| 202 |
+
G,
|
| 203 |
+
pos,
|
| 204 |
+
edgelist=edgelist,
|
| 205 |
+
width=width,
|
| 206 |
+
edge_color=edge_color,
|
| 207 |
+
style=style,
|
| 208 |
+
alpha=alpha,
|
| 209 |
+
arrowstyle=arrowstyle,
|
| 210 |
+
arrowsize=arrowsize,
|
| 211 |
+
edge_cmap=edge_cmap,
|
| 212 |
+
edge_vmin=edge_vmin,
|
| 213 |
+
edge_vmax=edge_vmax,
|
| 214 |
+
ax=ax,
|
| 215 |
+
arrows=arrows,
|
| 216 |
+
label=label,
|
| 217 |
+
node_size=node_size,
|
| 218 |
+
nodelist=nodelist,
|
| 219 |
+
node_shape=node_shape,
|
| 220 |
+
connectionstyle=connectionstyle,
|
| 221 |
+
min_source_margin=min_source_margin,
|
| 222 |
+
min_target_margin=min_target_margin
|
| 223 |
+
)
|
| 224 |
+
return result
|
| 225 |
+
wrapper.__name__ = func.__name__
|
| 226 |
+
return wrapper
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def log_function_specific_for_draw_networkx_nodes(func):
|
| 230 |
+
def wrapper(
|
| 231 |
+
G,
|
| 232 |
+
pos,
|
| 233 |
+
nodelist=None,
|
| 234 |
+
node_size=300,
|
| 235 |
+
node_color="#1f78b4",
|
| 236 |
+
node_shape="o",
|
| 237 |
+
alpha=None,
|
| 238 |
+
cmap=None,
|
| 239 |
+
vmin=None,
|
| 240 |
+
vmax=None,
|
| 241 |
+
ax=None,
|
| 242 |
+
linewidths=None,
|
| 243 |
+
edgecolors=None,
|
| 244 |
+
label=None,
|
| 245 |
+
margins=None,
|
| 246 |
+
):
|
| 247 |
+
global drawed_colors
|
| 248 |
+
global in_decorator
|
| 249 |
+
|
| 250 |
+
if not in_decorator:
|
| 251 |
+
in_decorator = True
|
| 252 |
+
|
| 253 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 254 |
+
|
| 255 |
+
result = func(
|
| 256 |
+
G,
|
| 257 |
+
pos,
|
| 258 |
+
nodelist=nodelist,
|
| 259 |
+
node_size=node_size,
|
| 260 |
+
node_color=node_color,
|
| 261 |
+
node_shape=node_shape,
|
| 262 |
+
alpha=alpha,
|
| 263 |
+
cmap=cmap,
|
| 264 |
+
vmin=vmin,
|
| 265 |
+
vmax=vmax,
|
| 266 |
+
ax=ax,
|
| 267 |
+
linewidths=linewidths,
|
| 268 |
+
edgecolors=edgecolors,
|
| 269 |
+
label=label,
|
| 270 |
+
margins=margins
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
for item in result.get_facecolors().tolist():
|
| 274 |
+
color = convert_color_to_hex(item)
|
| 275 |
+
drawed_colors.append(func_name + "--" + color)
|
| 276 |
+
drawed_objects[func_name + "--" + color] = result
|
| 277 |
+
|
| 278 |
+
in_decorator = False
|
| 279 |
+
else:
|
| 280 |
+
return func(
|
| 281 |
+
G,
|
| 282 |
+
pos,
|
| 283 |
+
nodelist=nodelist,
|
| 284 |
+
node_size=node_size,
|
| 285 |
+
node_color=node_color,
|
| 286 |
+
node_shape=node_shape,
|
| 287 |
+
alpha=alpha,
|
| 288 |
+
cmap=cmap,
|
| 289 |
+
vmin=vmin,
|
| 290 |
+
vmax=vmax,
|
| 291 |
+
ax=ax,
|
| 292 |
+
linewidths=linewidths,
|
| 293 |
+
edgecolors=edgecolors,
|
| 294 |
+
label=label,
|
| 295 |
+
margins=margins
|
| 296 |
+
)
|
| 297 |
+
return result
|
| 298 |
+
wrapper.__name__ = func.__name__
|
| 299 |
+
return wrapper
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def log_function_for_3d(func):
|
| 303 |
+
def wrapper(*args, **kwargs):
|
| 304 |
+
global drawed_colors
|
| 305 |
+
global in_decorator
|
| 306 |
+
|
| 307 |
+
if not in_decorator:
|
| 308 |
+
in_decorator = True
|
| 309 |
+
|
| 310 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 311 |
+
|
| 312 |
+
result = func(*args, **kwargs)
|
| 313 |
+
|
| 314 |
+
if func.__name__ == "scatter":
|
| 315 |
+
# check whether cmap is used
|
| 316 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 317 |
+
print("cmap is used", kwargs["cmap"])
|
| 318 |
+
if isinstance(kwargs["cmap"], str):
|
| 319 |
+
drawed_colors.append(
|
| 320 |
+
func_name + "_3d--" + kwargs["cmap"])
|
| 321 |
+
drawed_objects[func_name + "_3d--" +
|
| 322 |
+
kwargs["cmap"]] = result
|
| 323 |
+
else:
|
| 324 |
+
drawed_colors.append(
|
| 325 |
+
func_name + "_3d--" + kwargs["cmap"].name)
|
| 326 |
+
drawed_objects[func_name + "_3d--" +
|
| 327 |
+
kwargs["cmap"].name] = result
|
| 328 |
+
else:
|
| 329 |
+
for item in result.get_facecolors().tolist():
|
| 330 |
+
color = convert_color_to_hex(item)
|
| 331 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 332 |
+
drawed_objects[func_name + "_3d--" +
|
| 333 |
+
color] = result # ! Attention
|
| 334 |
+
elif func.__name__ == "plot":
|
| 335 |
+
for line in result:
|
| 336 |
+
color = convert_color_to_hex(line.get_color())
|
| 337 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 338 |
+
drawed_objects[func_name + "_3d--" + color] = line
|
| 339 |
+
elif func.__name__ == "plot_surface":
|
| 340 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 341 |
+
print("cmap is used", kwargs["cmap"])
|
| 342 |
+
if isinstance(kwargs["cmap"], str):
|
| 343 |
+
drawed_colors.append(
|
| 344 |
+
func_name + "_3d--" + kwargs["cmap"])
|
| 345 |
+
drawed_objects[func_name + "_3d--" +
|
| 346 |
+
kwargs["cmap"]] = result
|
| 347 |
+
else:
|
| 348 |
+
drawed_colors.append(
|
| 349 |
+
func_name + "_3d--" + kwargs["cmap"].name) # ! Attention
|
| 350 |
+
drawed_objects[func_name + "_3d--" +
|
| 351 |
+
kwargs["cmap"].name] = result
|
| 352 |
+
else:
|
| 353 |
+
colors = result.get_facecolors().tolist()
|
| 354 |
+
drawed_colors.append(
|
| 355 |
+
func_name +
|
| 356 |
+
"_3d--" +
|
| 357 |
+
convert_color_to_hex(
|
| 358 |
+
colors[0]))
|
| 359 |
+
# ! Attention
|
| 360 |
+
drawed_objects[func_name + "_3d--" +
|
| 361 |
+
convert_color_to_hex(colors[0])] = result
|
| 362 |
+
elif func.__name__ == "bar3d":
|
| 363 |
+
colors = result.get_facecolors().tolist()
|
| 364 |
+
drawed_colors.append(
|
| 365 |
+
func_name +
|
| 366 |
+
"_3d--" +
|
| 367 |
+
convert_color_to_hex(
|
| 368 |
+
colors[0]))
|
| 369 |
+
# ! Attention
|
| 370 |
+
drawed_objects[func_name + "_3d--" +
|
| 371 |
+
convert_color_to_hex(colors[0])] = result
|
| 372 |
+
elif func.__name__ == "bar":
|
| 373 |
+
for item in result:
|
| 374 |
+
color = convert_color_to_hex(item.get_facecolor())
|
| 375 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 376 |
+
drawed_objects[func_name + "_3d--" + color] = item
|
| 377 |
+
elif func.__name__ == "add_collection3d":
|
| 378 |
+
colors = result.get_facecolors().tolist()
|
| 379 |
+
for color in colors:
|
| 380 |
+
drawed_colors.append(
|
| 381 |
+
func_name + "_3d--" + convert_color_to_hex(color))
|
| 382 |
+
drawed_objects[func_name + "_3d--" +
|
| 383 |
+
convert_color_to_hex(color)] = result
|
| 384 |
+
|
| 385 |
+
in_decorator = False
|
| 386 |
+
else:
|
| 387 |
+
return func(*args, **kwargs)
|
| 388 |
+
return result
|
| 389 |
+
wrapper.__name__ = func.__name__
|
| 390 |
+
return wrapper
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def log_function(func):
|
| 394 |
+
def wrapper(*args, **kwargs):
|
| 395 |
+
global drawed_colors
|
| 396 |
+
global in_decorator
|
| 397 |
+
|
| 398 |
+
if not in_decorator:
|
| 399 |
+
in_decorator = True
|
| 400 |
+
|
| 401 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 402 |
+
|
| 403 |
+
result = func(*args, **kwargs)
|
| 404 |
+
|
| 405 |
+
if func.__name__ == "_makeline":
|
| 406 |
+
color = convert_color_to_hex(result[1]["color"])
|
| 407 |
+
drawed_colors.append(func_name + "--" + color)
|
| 408 |
+
drawed_objects[func_name + "--" + color] = result[0]
|
| 409 |
+
elif func.__name__ == "axhline":
|
| 410 |
+
color = convert_color_to_hex(result.get_color())
|
| 411 |
+
drawed_colors.append(func_name + "--" + color)
|
| 412 |
+
drawed_objects[func_name + "--" + color] = result
|
| 413 |
+
elif func.__name__ == "axvline":
|
| 414 |
+
color = convert_color_to_hex(result.get_color())
|
| 415 |
+
drawed_colors.append(func_name + "--" + color)
|
| 416 |
+
drawed_objects[func_name + "--" + color] = result
|
| 417 |
+
elif func.__name__ == "_fill_between_x_or_y":
|
| 418 |
+
color = convert_color_to_hex(list(result.get_facecolors()[0]))
|
| 419 |
+
drawed_colors.append(func_name + "--" + color)
|
| 420 |
+
drawed_objects[func_name + "--" + color] = result
|
| 421 |
+
elif func.__name__ == "bar":
|
| 422 |
+
for item in result:
|
| 423 |
+
color = convert_color_to_hex(
|
| 424 |
+
list(item._original_facecolor))
|
| 425 |
+
drawed_colors.append(func_name + "--" + color)
|
| 426 |
+
drawed_objects[func_name + "--" + color] = item
|
| 427 |
+
elif func.__name__ == "scatter" and not isinstance(args[0], PolarAxes):
|
| 428 |
+
# check whether cmap is used
|
| 429 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 430 |
+
print("cmap is used", kwargs["cmap"])
|
| 431 |
+
if isinstance(kwargs["cmap"], str):
|
| 432 |
+
drawed_colors.append(func_name + "--" + kwargs["cmap"])
|
| 433 |
+
drawed_objects[func_name + "--" +
|
| 434 |
+
kwargs["cmap"]] = result
|
| 435 |
+
else:
|
| 436 |
+
drawed_colors.append(
|
| 437 |
+
func_name + "--" + kwargs["cmap"].name) # ! Attention
|
| 438 |
+
drawed_objects[func_name + "--" +
|
| 439 |
+
kwargs["cmap"].name] = result
|
| 440 |
+
else:
|
| 441 |
+
if len(result.get_facecolor()) != 0:
|
| 442 |
+
color = convert_color_to_hex(
|
| 443 |
+
list(result.get_facecolor()[0]))
|
| 444 |
+
drawed_colors.append(func_name + "--" + color)
|
| 445 |
+
drawed_objects[func_name + "--" + color] = result
|
| 446 |
+
elif func.__name__ == "pie":
|
| 447 |
+
for item in result[0]:
|
| 448 |
+
color = convert_color_to_hex(item.get_facecolor())
|
| 449 |
+
drawed_colors.append(func_name + "--" + color)
|
| 450 |
+
drawed_objects[func_name + "--" + color] = item
|
| 451 |
+
elif func.__name__ == "axvspan":
|
| 452 |
+
color = convert_color_to_hex(result.get_facecolor())
|
| 453 |
+
drawed_colors.append(func_name + "--" + color)
|
| 454 |
+
drawed_objects[func_name + "--" + color] = result
|
| 455 |
+
elif func.__name__ == "axhspan":
|
| 456 |
+
color = convert_color_to_hex(result.get_facecolor())
|
| 457 |
+
drawed_colors.append(func_name + "--" + color)
|
| 458 |
+
drawed_objects[func_name + "--" + color] = result
|
| 459 |
+
elif func.__name__ == "hlines":
|
| 460 |
+
for item in result.get_edgecolors():
|
| 461 |
+
color = convert_color_to_hex(list(item))
|
| 462 |
+
drawed_colors.append(func_name + "--" + color)
|
| 463 |
+
drawed_objects[func_name + "--" +
|
| 464 |
+
color] = result # ! Attention
|
| 465 |
+
elif func.__name__ == "vlines":
|
| 466 |
+
for item in result.get_edgecolors():
|
| 467 |
+
color = convert_color_to_hex(list(item))
|
| 468 |
+
drawed_colors.append(func_name + "--" + color)
|
| 469 |
+
drawed_objects[func_name + "--" +
|
| 470 |
+
color] = result # ! Attention
|
| 471 |
+
elif func.__name__ == "boxplot":
|
| 472 |
+
for item in result["boxes"]:
|
| 473 |
+
if isinstance(item, matplotlib.patches.PathPatch):
|
| 474 |
+
color = convert_color_to_hex(
|
| 475 |
+
list(item.get_facecolor()))
|
| 476 |
+
drawed_colors.append(func_name + "--" + color)
|
| 477 |
+
drawed_objects[func_name + "--" +
|
| 478 |
+
color] = item # ! Attention
|
| 479 |
+
elif func.__name__ == "violinplot":
|
| 480 |
+
for item in result["bodies"]:
|
| 481 |
+
color = convert_color_to_hex(list(item.get_facecolor()[0]))
|
| 482 |
+
drawed_colors.append(func_name + "--" + color)
|
| 483 |
+
drawed_objects[func_name + "--" +
|
| 484 |
+
color] = item # ! Attention
|
| 485 |
+
elif func.__name__ == "hist":
|
| 486 |
+
tops, bins, patches = result
|
| 487 |
+
if not isinstance(patches, matplotlib.cbook.silent_list):
|
| 488 |
+
for item in patches:
|
| 489 |
+
color = convert_color_to_hex(
|
| 490 |
+
list(item.get_facecolor()))
|
| 491 |
+
drawed_colors.append(func_name + "--" + color)
|
| 492 |
+
drawed_objects[func_name + "--" + color] = item
|
| 493 |
+
else:
|
| 494 |
+
for container in patches:
|
| 495 |
+
for item in container:
|
| 496 |
+
color = convert_color_to_hex(
|
| 497 |
+
list(item.get_facecolor()))
|
| 498 |
+
drawed_colors.append(func_name + "--" + color)
|
| 499 |
+
drawed_objects[func_name + "--" + color] = item
|
| 500 |
+
elif func.__name__ == "quiver":
|
| 501 |
+
for item in result.get_facecolors().tolist():
|
| 502 |
+
color = convert_color_to_hex(item)
|
| 503 |
+
drawed_colors.append(func_name + "--" + color)
|
| 504 |
+
drawed_objects[func_name + "--" +
|
| 505 |
+
color] = result # ! Attention
|
| 506 |
+
elif func.__name__ == "plot" and len(args) > 0 and isinstance(args[0], PolarAxes):
|
| 507 |
+
lines = result
|
| 508 |
+
for line in lines:
|
| 509 |
+
color = convert_color_to_hex(line.get_color())
|
| 510 |
+
# print("color", color)
|
| 511 |
+
drawed_colors.append(func_name + "_polar" + "--" + color)
|
| 512 |
+
drawed_objects[func_name + "_polar" + "--" + color] = line
|
| 513 |
+
elif func.__name__ == "scatter" and isinstance(args[0], PolarAxes):
|
| 514 |
+
# check whether cmap is used
|
| 515 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 516 |
+
print("cmap is used", kwargs["cmap"])
|
| 517 |
+
if isinstance(kwargs["cmap"], str):
|
| 518 |
+
drawed_colors.append(
|
| 519 |
+
func_name + "_polar" + "--" + kwargs["cmap"])
|
| 520 |
+
drawed_objects[func_name +
|
| 521 |
+
"_polar--" + kwargs["cmap"]] = result
|
| 522 |
+
else:
|
| 523 |
+
drawed_colors.append(
|
| 524 |
+
func_name + "_polar" + "--" + kwargs["cmap"].name)
|
| 525 |
+
drawed_objects[func_name + "_polar" +
|
| 526 |
+
"--" + kwargs["cmap"].name] = result
|
| 527 |
+
else:
|
| 528 |
+
if len(result.get_facecolor()) != 0:
|
| 529 |
+
color = convert_color_to_hex(
|
| 530 |
+
list(result.get_facecolor()[0]))
|
| 531 |
+
drawed_colors.append(
|
| 532 |
+
func_name + "_polar" + "--" + color)
|
| 533 |
+
drawed_objects[func_name + "_polar" +
|
| 534 |
+
"--" + color] = result # ! Attention
|
| 535 |
+
elif func.__name__ == "plot" and "squarify" in func_name:
|
| 536 |
+
# get ax
|
| 537 |
+
ax = result
|
| 538 |
+
# get container
|
| 539 |
+
containers = ax.containers
|
| 540 |
+
for container in containers:
|
| 541 |
+
for item in container:
|
| 542 |
+
color = convert_color_to_hex(
|
| 543 |
+
list(item.get_facecolor()))
|
| 544 |
+
drawed_colors.append(
|
| 545 |
+
func_name + "_squarify" + "--" + color)
|
| 546 |
+
drawed_objects[func_name +
|
| 547 |
+
"_squarify" + "--" + color] = item
|
| 548 |
+
elif func.__name__ == "imshow":
|
| 549 |
+
colormap = result.get_cmap().name
|
| 550 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 551 |
+
drawed_objects[func_name + "--" +
|
| 552 |
+
colormap] = result # ! Attention
|
| 553 |
+
elif func.__name__ == "pcolor":
|
| 554 |
+
colormap = result.get_cmap().name
|
| 555 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 556 |
+
drawed_objects[func_name + "--" +
|
| 557 |
+
colormap] = result # ! Attention
|
| 558 |
+
elif func.__name__ == "contour":
|
| 559 |
+
colormap = result.get_cmap().name
|
| 560 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 561 |
+
drawed_objects[func_name + "--" +
|
| 562 |
+
colormap] = result # ! Attention
|
| 563 |
+
elif func.__name__ == "contourf":
|
| 564 |
+
colormap = result.get_cmap().name
|
| 565 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 566 |
+
drawed_objects[func_name + "--" +
|
| 567 |
+
colormap] = result # ! Attention
|
| 568 |
+
elif func.__name__ == "fill":
|
| 569 |
+
patches = result
|
| 570 |
+
for patch in patches:
|
| 571 |
+
color = convert_color_to_hex(list(patch.get_facecolor()))
|
| 572 |
+
drawed_colors.append(func_name + "--" + color)
|
| 573 |
+
drawed_objects[func_name + "--" + color] = patch
|
| 574 |
+
elif func.__name__ == "__init__" and isinstance(args[0], NonUniformImage):
|
| 575 |
+
colormap = args[0].get_cmap().name
|
| 576 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 577 |
+
drawed_objects[func_name + "--" + colormap] = args[0]
|
| 578 |
+
elif func.__name__ == "broken_barh":
|
| 579 |
+
colors = result.get_facecolors().tolist()
|
| 580 |
+
for color in colors:
|
| 581 |
+
drawed_colors.append(
|
| 582 |
+
func_name + "--" + convert_color_to_hex(color))
|
| 583 |
+
drawed_objects[func_name + "--" +
|
| 584 |
+
convert_color_to_hex(color)] = result
|
| 585 |
+
elif func.__name__ == "__init__" and isinstance(args[0], Ellipse):
|
| 586 |
+
color = convert_color_to_hex(args[0].get_facecolor())
|
| 587 |
+
drawed_colors.append(func_name + "--" + color)
|
| 588 |
+
drawed_objects[func_name + "--" + color] = args[0]
|
| 589 |
+
elif func.__name__ == "tripcolor":
|
| 590 |
+
colormap = result.get_cmap().name
|
| 591 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 592 |
+
drawed_objects[func_name + "--" +
|
| 593 |
+
colormap] = result # ! Attention
|
| 594 |
+
elif func.__name__ == "__init__" and isinstance(args[0], VennDiagram):
|
| 595 |
+
for item in args[0].patches:
|
| 596 |
+
color = convert_color_to_hex(item.get_facecolor())
|
| 597 |
+
drawed_colors.append(func_name + "--" + color)
|
| 598 |
+
drawed_objects[func_name + "--" + color] = args[0]
|
| 599 |
+
elif func.__name__ == "__init__" and isinstance(args[0], Circle):
|
| 600 |
+
color = convert_color_to_hex(args[0].get_facecolor())
|
| 601 |
+
drawed_colors.append(func_name + "--" + color)
|
| 602 |
+
drawed_objects[func_name + "--" + color] = args[0]
|
| 603 |
+
in_decorator = False
|
| 604 |
+
else:
|
| 605 |
+
return func(*args, **kwargs)
|
| 606 |
+
return result
|
| 607 |
+
|
| 608 |
+
wrapper.__name__ = func.__name__
|
| 609 |
+
return wrapper
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
def update_drawed_colors(drawed_obejcts):
|
| 613 |
+
drawed_colors = []
|
| 614 |
+
for name, obj in drawed_objects.items():
|
| 615 |
+
func_name = name.split("--")[0]
|
| 616 |
+
color = name.split("--")[1]
|
| 617 |
+
|
| 618 |
+
if "/_makeline" in func_name:
|
| 619 |
+
color = convert_color_to_hex(obj.get_color())
|
| 620 |
+
drawed_colors.append(func_name + "--" + color)
|
| 621 |
+
elif "/axhline" in func_name:
|
| 622 |
+
color = convert_color_to_hex(obj.get_color())
|
| 623 |
+
drawed_colors.append(func_name + "--" + color)
|
| 624 |
+
elif "/axvline" in func_name:
|
| 625 |
+
color = convert_color_to_hex(obj.get_color())
|
| 626 |
+
drawed_colors.append(func_name + "--" + color)
|
| 627 |
+
elif "/_fill_between_x_or_y" in func_name:
|
| 628 |
+
color = convert_color_to_hex(list(obj.get_facecolors()[0]))
|
| 629 |
+
drawed_colors.append(func_name + "--" + color)
|
| 630 |
+
elif "/bar" in func_name and "_3d" not in func_name:
|
| 631 |
+
color = convert_color_to_hex(list(obj._original_facecolor))
|
| 632 |
+
if color is not None:
|
| 633 |
+
drawed_colors.append(func_name + "--" + color)
|
| 634 |
+
elif "/scatter" in func_name and "polar" not in func_name and "3d" not in func_name:
|
| 635 |
+
# check whether cmap is used by checking whether color is hex
|
| 636 |
+
if color.startswith("#") is False:
|
| 637 |
+
drawed_colors.append(func_name + "--" + color)
|
| 638 |
+
else:
|
| 639 |
+
if len(obj.get_facecolor()) != 0:
|
| 640 |
+
color = convert_color_to_hex(list(obj.get_facecolor()[0]))
|
| 641 |
+
drawed_colors.append(func_name + "--" + color)
|
| 642 |
+
elif "/pie" in func_name:
|
| 643 |
+
color = convert_color_to_hex(obj.get_facecolor())
|
| 644 |
+
drawed_colors.append(func_name + "--" + color)
|
| 645 |
+
elif "/axvspan" in func_name:
|
| 646 |
+
color = convert_color_to_hex(obj.get_facecolor())
|
| 647 |
+
drawed_colors.append(func_name + "--" + color)
|
| 648 |
+
elif "/axhspan" in func_name:
|
| 649 |
+
color = convert_color_to_hex(obj.get_facecolor())
|
| 650 |
+
drawed_colors.append(func_name + "--" + color)
|
| 651 |
+
elif "/hlines" in func_name:
|
| 652 |
+
for item in obj.get_edgecolors():
|
| 653 |
+
color = convert_color_to_hex(list(item))
|
| 654 |
+
drawed_colors.append(func_name + "--" + color)
|
| 655 |
+
elif "/vlines" in func_name:
|
| 656 |
+
for item in obj.get_edgecolors():
|
| 657 |
+
color = convert_color_to_hex(list(item))
|
| 658 |
+
drawed_colors.append(func_name + "--" + color)
|
| 659 |
+
elif "/boxplot" in func_name:
|
| 660 |
+
color = convert_color_to_hex(list(obj.get_facecolor()))
|
| 661 |
+
drawed_colors.append(func_name + "--" + color)
|
| 662 |
+
elif "/violinplot" in func_name:
|
| 663 |
+
color = convert_color_to_hex(list(obj.get_facecolor()[0]))
|
| 664 |
+
drawed_colors.append(func_name + "--" + color)
|
| 665 |
+
elif "/hist" in func_name:
|
| 666 |
+
color = convert_color_to_hex(list(obj.get_facecolor()))
|
| 667 |
+
drawed_colors.append(func_name + "--" + color)
|
| 668 |
+
elif "/quiver" in func_name:
|
| 669 |
+
for item in obj.get_facecolors().tolist():
|
| 670 |
+
color = convert_color_to_hex(item)
|
| 671 |
+
drawed_colors.append(func_name + "--" + color)
|
| 672 |
+
elif "/plot" in func_name and "polar" in func_name:
|
| 673 |
+
color = convert_color_to_hex(obj.get_color())
|
| 674 |
+
drawed_colors.append(func_name + "_polar--" + color)
|
| 675 |
+
elif "/scatter" in func_name and "polar" in func_name:
|
| 676 |
+
# check whether cmap is used by checking whether color is hex
|
| 677 |
+
if color.startswith("#") is False:
|
| 678 |
+
drawed_colors.append(func_name + "_polar--" + color)
|
| 679 |
+
else:
|
| 680 |
+
if len(obj.get_facecolor()) != 0:
|
| 681 |
+
color = convert_color_to_hex(list(obj.get_facecolor()[0]))
|
| 682 |
+
drawed_colors.append(func_name + "_polar--" + color)
|
| 683 |
+
elif "/plot" in func_name and "_squarify" in func_name:
|
| 684 |
+
color = convert_color_to_hex(list(obj.get_facecolor()))
|
| 685 |
+
drawed_colors.append(func_name + "--" + color)
|
| 686 |
+
elif "/imshow" in func_name:
|
| 687 |
+
colormap = obj.get_cmap().name
|
| 688 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 689 |
+
elif "/pcolor" in func_name:
|
| 690 |
+
colormap = obj.get_cmap().name
|
| 691 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 692 |
+
elif "/contour" in func_name:
|
| 693 |
+
colormap = obj.get_cmap().name
|
| 694 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 695 |
+
elif "/contourf" in func_name:
|
| 696 |
+
colormap = obj.get_cmap().name
|
| 697 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 698 |
+
elif "/fill" in func_name:
|
| 699 |
+
color = convert_color_to_hex(list(obj.get_facecolor()))
|
| 700 |
+
drawed_colors.append(func_name + "--" + color)
|
| 701 |
+
elif "/__init__" in func_name and isinstance(obj, NonUniformImage):
|
| 702 |
+
colormap = obj.get_cmap().name
|
| 703 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 704 |
+
elif "/broken_barh" in func_name:
|
| 705 |
+
colors = obj.get_facecolors().tolist()
|
| 706 |
+
for color in colors:
|
| 707 |
+
drawed_colors.append(
|
| 708 |
+
func_name + "--" + convert_color_to_hex(color))
|
| 709 |
+
elif "/__init__" in func_name and isinstance(obj, Ellipse):
|
| 710 |
+
color = convert_color_to_hex(obj.get_facecolor())
|
| 711 |
+
drawed_colors.append(func_name + "--" + color)
|
| 712 |
+
elif "/tripcolor" in func_name:
|
| 713 |
+
colormap = obj.get_cmap().name
|
| 714 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 715 |
+
elif "/__init__" in func_name and isinstance(obj, VennDiagram):
|
| 716 |
+
for item in obj.patches:
|
| 717 |
+
color = convert_color_to_hex(item.get_facecolor())
|
| 718 |
+
drawed_colors.append(func_name + "--" + color)
|
| 719 |
+
elif "/__init__" in func_name and isinstance(obj, Circle):
|
| 720 |
+
color = convert_color_to_hex(obj.get_facecolor())
|
| 721 |
+
drawed_colors.append(func_name + "--" + color)
|
| 722 |
+
elif "/scatter" in func_name and "3d" in func_name:
|
| 723 |
+
# check whether cmap is used by checking whether color is hex
|
| 724 |
+
if color.startswith("#") is False:
|
| 725 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 726 |
+
else:
|
| 727 |
+
for item in obj.get_facecolors().tolist():
|
| 728 |
+
color = convert_color_to_hex(item)
|
| 729 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 730 |
+
elif "/plot" in func_name and "3d" in func_name and "plot_surface" not in func_name:
|
| 731 |
+
color = convert_color_to_hex(obj.get_color())
|
| 732 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 733 |
+
elif "/plot_surface" in func_name:
|
| 734 |
+
if color.startswith("#") is False:
|
| 735 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 736 |
+
else:
|
| 737 |
+
colors = obj.get_facecolors().tolist()
|
| 738 |
+
drawed_colors.append(
|
| 739 |
+
func_name +
|
| 740 |
+
"_3d--" +
|
| 741 |
+
convert_color_to_hex(
|
| 742 |
+
colors[0]))
|
| 743 |
+
elif "/bar3d" in func_name:
|
| 744 |
+
colors = obj.get_facecolors().tolist()
|
| 745 |
+
drawed_colors.append(
|
| 746 |
+
func_name +
|
| 747 |
+
"_3d--" +
|
| 748 |
+
convert_color_to_hex(
|
| 749 |
+
colors[0]))
|
| 750 |
+
elif "/bar" in func_name and "3d" in func_name:
|
| 751 |
+
color = convert_color_to_hex(obj.get_facecolor())
|
| 752 |
+
drawed_colors.append(func_name + "_3d--" + color)
|
| 753 |
+
elif "/add_collection3d" in func_name:
|
| 754 |
+
colors = obj.get_facecolors().tolist()
|
| 755 |
+
for color in colors:
|
| 756 |
+
drawed_colors.append(
|
| 757 |
+
func_name + "_3d--" + convert_color_to_hex(color))
|
| 758 |
+
elif "/draw_networkx_labels" in func_name:
|
| 759 |
+
color = convert_color_to_hex(obj.get_color())
|
| 760 |
+
drawed_colors.append(func_name + "--" + color)
|
| 761 |
+
elif "/draw_networkx_edges" in func_name:
|
| 762 |
+
if isinstance(obj, list):
|
| 763 |
+
for line in obj:
|
| 764 |
+
color = convert_color_to_hex(line.get_facecolor())
|
| 765 |
+
drawed_colors.append(func_name + "--" + color)
|
| 766 |
+
else:
|
| 767 |
+
for item in obj.get_edgecolors().tolist():
|
| 768 |
+
color = convert_color_to_hex(item)
|
| 769 |
+
drawed_colors.append(func_name + "--" + color)
|
| 770 |
+
elif "/draw_networkx_nodes" in func_name:
|
| 771 |
+
for item in obj.get_facecolors().tolist():
|
| 772 |
+
color = convert_color_to_hex(item)
|
| 773 |
+
drawed_colors.append(func_name + "--" + color)
|
| 774 |
+
|
| 775 |
+
drawed_colors = list(set(drawed_colors))
|
| 776 |
+
|
| 777 |
+
return drawed_colors
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
_process_plot_var_args._makeline = log_function(
|
| 781 |
+
_process_plot_var_args._makeline)
|
| 782 |
+
Axes.bar = log_function(Axes.bar)
|
| 783 |
+
Axes.scatter = log_function(Axes.scatter)
|
| 784 |
+
Axes.axhline = log_function(Axes.axhline)
|
| 785 |
+
Axes.axvline = log_function(Axes.axvline)
|
| 786 |
+
Axes._fill_between_x_or_y = log_function(Axes._fill_between_x_or_y)
|
| 787 |
+
Axes.pie = log_function(Axes.pie)
|
| 788 |
+
Axes.axvspan = log_function(Axes.axvspan)
|
| 789 |
+
Axes.axhspan = log_function(Axes.axhspan)
|
| 790 |
+
Axes.hlines = log_function(Axes.hlines)
|
| 791 |
+
Axes.vlines = log_function(Axes.vlines)
|
| 792 |
+
Axes.boxplot = log_function(Axes.boxplot)
|
| 793 |
+
Axes.violinplot = log_function(Axes.violinplot)
|
| 794 |
+
Axes.hist = log_function(Axes.hist)
|
| 795 |
+
# Axes.plot = log_function(Axes.plot)
|
| 796 |
+
PolarAxes.plot = log_function(PolarAxes.plot)
|
| 797 |
+
Axes.quiver = log_function(Axes.quiver)
|
| 798 |
+
Axes.imshow = log_function(Axes.imshow)
|
| 799 |
+
Axes.pcolor = log_function(Axes.pcolor)
|
| 800 |
+
Axes.contour = log_function(Axes.contour)
|
| 801 |
+
Axes.contourf = log_function(Axes.contourf)
|
| 802 |
+
Axes.fill = log_function(Axes.fill)
|
| 803 |
+
NonUniformImage.__init__ = log_function(NonUniformImage.__init__)
|
| 804 |
+
Ellipse.__init__ = log_function(Ellipse.__init__)
|
| 805 |
+
Axes.broken_barh = log_function(Axes.broken_barh)
|
| 806 |
+
|
| 807 |
+
nx_pylab.draw_networkx_nodes = log_function_specific_for_draw_networkx_nodes(
|
| 808 |
+
nx_pylab.draw_networkx_nodes)
|
| 809 |
+
nx_pylab.draw_networkx_edges = log_function_specific_for_draw_networkx_edges(
|
| 810 |
+
nx_pylab.draw_networkx_edges)
|
| 811 |
+
nx_pylab.draw_networkx_labels = log_function_specific_for_draw_networkx_labels(
|
| 812 |
+
nx_pylab.draw_networkx_labels)
|
| 813 |
+
|
| 814 |
+
nx.draw_networkx_nodes = log_function_specific_for_draw_networkx_nodes(
|
| 815 |
+
nx.draw_networkx_nodes)
|
| 816 |
+
nx.draw_networkx_edges = log_function_specific_for_draw_networkx_edges(
|
| 817 |
+
nx.draw_networkx_edges)
|
| 818 |
+
nx.draw_networkx_labels = log_function_specific_for_draw_networkx_labels(
|
| 819 |
+
nx.draw_networkx_labels)
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
squarify.plot = log_function(squarify.plot)
|
| 823 |
+
|
| 824 |
+
Axes3D.scatter = log_function_for_3d(Axes3D.scatter)
|
| 825 |
+
Axes3D.plot = log_function_for_3d(Axes3D.plot)
|
| 826 |
+
Axes3D.plot_surface = log_function_for_3d(Axes3D.plot_surface)
|
| 827 |
+
Axes3D.bar3d = log_function_for_3d(Axes3D.bar3d)
|
| 828 |
+
Axes3D.bar = log_function_for_3d(Axes3D.bar)
|
| 829 |
+
Axes3D.add_collection3d = log_function_for_3d(Axes3D.add_collection3d)
|
| 830 |
+
|
| 831 |
+
Axes.tripcolor = log_function(Axes.tripcolor)
|
| 832 |
+
|
| 833 |
+
VennDiagram.__init__ = log_function(VennDiagram.__init__)
|
| 834 |
+
|
| 835 |
+
Circle.__init__ = log_function(Circle.__init__)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/color_utils.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
# This is a patch for color map, which is not updated for newer version of
|
| 4 |
+
# numpy
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def patch_asscalar(a):
|
| 8 |
+
return a.item()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
setattr(np, "asscalar", patch_asscalar)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def hex_to_rgb(hex_color):
|
| 15 |
+
hex_color = hex_color.lstrip('#')
|
| 16 |
+
return tuple(int(hex_color[i:i + 2], 16) for i in (0, 2, 4))
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def rgb_to_lab(rgb):
|
| 20 |
+
"""
|
| 21 |
+
Convert an RGB color to Lab color space.
|
| 22 |
+
RGB values should be in the range [0, 255].
|
| 23 |
+
"""
|
| 24 |
+
# Create an sRGBColor object from RGB values
|
| 25 |
+
from colormath.color_objects import sRGBColor, LabColor
|
| 26 |
+
from colormath.color_conversions import convert_color
|
| 27 |
+
rgb_color = sRGBColor(rgb[0], rgb[1], rgb[2], is_upscaled=True)
|
| 28 |
+
|
| 29 |
+
# Convert to Lab color space
|
| 30 |
+
lab_color = convert_color(rgb_color, LabColor)
|
| 31 |
+
|
| 32 |
+
return lab_color
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def calculate_similarity_single(c1, c2):
|
| 36 |
+
if c1.startswith("#") and c2.startswith("#"):
|
| 37 |
+
# c1 = rgb2lab(np.array([hex_to_rgb(c1)]))
|
| 38 |
+
# c2 = rgb2lab(np.array([hex_to_rgb(c2)]))
|
| 39 |
+
c1 = hex_to_rgb(c1)
|
| 40 |
+
c2 = hex_to_rgb(c2)
|
| 41 |
+
lab1 = rgb_to_lab(c1)
|
| 42 |
+
lab2 = rgb_to_lab(c2)
|
| 43 |
+
# return max(0, 1 - deltaE_cie76(c1, c2)[0] / 100)
|
| 44 |
+
from colormath.color_diff import delta_e_cie2000
|
| 45 |
+
return max(0, 1 - (delta_e_cie2000(lab1, lab2) / 100))
|
| 46 |
+
elif not c1.startswith("#") and not c2.startswith("#"):
|
| 47 |
+
|
| 48 |
+
return 1 if c1 == c2 else 0
|
| 49 |
+
else:
|
| 50 |
+
return 0
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def filter_color(color_list):
|
| 54 |
+
filtered_color_list = []
|
| 55 |
+
len_color_list = len(color_list)
|
| 56 |
+
for i in range(len_color_list):
|
| 57 |
+
if i != 0:
|
| 58 |
+
put_in = True
|
| 59 |
+
for item in filtered_color_list:
|
| 60 |
+
similarity = calculate_similarity_single(
|
| 61 |
+
color_list[i].split("--")[1], item.split("--")[1])
|
| 62 |
+
if similarity > 0.7:
|
| 63 |
+
put_in = False
|
| 64 |
+
break
|
| 65 |
+
if put_in:
|
| 66 |
+
filtered_color_list.append(color_list[i])
|
| 67 |
+
else:
|
| 68 |
+
filtered_color_list.append(color_list[i])
|
| 69 |
+
# print("Filtered color list: ", filtered_color_list)
|
| 70 |
+
return filtered_color_list
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def group_color(color_list):
|
| 74 |
+
color_dict = {}
|
| 75 |
+
|
| 76 |
+
for color in color_list:
|
| 77 |
+
chart_type = color.split("--")[0]
|
| 78 |
+
color = color.split("--")[1]
|
| 79 |
+
|
| 80 |
+
if chart_type not in color_dict:
|
| 81 |
+
color_dict[chart_type] = [color]
|
| 82 |
+
else:
|
| 83 |
+
color_dict[chart_type].append(color)
|
| 84 |
+
|
| 85 |
+
return color_dict
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/grid_evaluator.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
# from dotenv import load_dotenv
|
| 4 |
+
# load_dotenv()
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
# sys.path.insert(0, os.environ["PROJECT_PATH"])
|
| 8 |
+
|
| 9 |
+
from ..eval_configs.global_config import run_script_safe
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class GridEvaluator:
|
| 14 |
+
|
| 15 |
+
def __init__(self) -> None:
|
| 16 |
+
self.metrics = {
|
| 17 |
+
"precision": 0,
|
| 18 |
+
"recall": 0,
|
| 19 |
+
"f1": 0
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
def __call__(self, generation_code_file, golden_code_file):
|
| 23 |
+
generation_grids = self._log_legends(generation_code_file)
|
| 24 |
+
golden_grids = self._log_legends(golden_code_file)
|
| 25 |
+
|
| 26 |
+
self._calculate_metrics(generation_grids, golden_grids)
|
| 27 |
+
|
| 28 |
+
# redunant_file = os.environ["PROJECT_PATH"] + "/" + os.path.basename(golden_code_file).replace(".py", ".pdf")
|
| 29 |
+
# os.remove(redunant_file)
|
| 30 |
+
# print(self.metrics)
|
| 31 |
+
|
| 32 |
+
def _log_legends(self, code_file):
|
| 33 |
+
"""
|
| 34 |
+
Get legend objects of the code
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
with open(code_file, 'r') as f:
|
| 38 |
+
lines = f.readlines()
|
| 39 |
+
code = ''.join(lines)
|
| 40 |
+
|
| 41 |
+
prefix = self._get_prefix()
|
| 42 |
+
output_file = code_file.replace(".py", ".txt")
|
| 43 |
+
suffix = self._get_suffix(output_file)
|
| 44 |
+
code = prefix + code + suffix
|
| 45 |
+
|
| 46 |
+
code_log_texts_file = code_file.replace(".py", "_log_legends.py")
|
| 47 |
+
with open(code_log_texts_file, 'w') as f:
|
| 48 |
+
f.write(code)
|
| 49 |
+
|
| 50 |
+
# os.system(f"python3 {code_log_texts_file}")
|
| 51 |
+
success = run_script_safe(code_log_texts_file)
|
| 52 |
+
if not success:
|
| 53 |
+
print("Skip downstream logic due to previous failure.")
|
| 54 |
+
# optionally return default result or continue
|
| 55 |
+
|
| 56 |
+
with open(output_file, 'r') as f:
|
| 57 |
+
texts = f.read()
|
| 58 |
+
texts = eval(texts)
|
| 59 |
+
|
| 60 |
+
os.remove(code_log_texts_file)
|
| 61 |
+
os.remove(output_file)
|
| 62 |
+
|
| 63 |
+
# pdf_file = re.findall(r"plt\.savefig\('(.*)'\)", code)
|
| 64 |
+
# if len(pdf_file) != 0:
|
| 65 |
+
# pdf_file = pdf_file[0]
|
| 66 |
+
# if os.path.basename(pdf_file) == pdf_file:
|
| 67 |
+
# os.remove(pdf_file)
|
| 68 |
+
|
| 69 |
+
return texts
|
| 70 |
+
|
| 71 |
+
def _calculate_metrics(
|
| 72 |
+
self,
|
| 73 |
+
generation_grids: List[Tuple],
|
| 74 |
+
golden_grids: List[Tuple]):
|
| 75 |
+
"""
|
| 76 |
+
Calculate the metrics
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
- generation_grids: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 80 |
+
- golden_grids: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 81 |
+
"""
|
| 82 |
+
if len(generation_grids) == 0 or len(golden_grids) == 0:
|
| 83 |
+
self.metrics["precision"] = 0
|
| 84 |
+
self.metrics["recall"] = 0
|
| 85 |
+
self.metrics["f1"] = 0
|
| 86 |
+
return
|
| 87 |
+
|
| 88 |
+
len_generation = len(generation_grids)
|
| 89 |
+
len_golden = len(golden_grids)
|
| 90 |
+
|
| 91 |
+
n_correct = 0
|
| 92 |
+
for t in golden_grids:
|
| 93 |
+
if t in generation_grids:
|
| 94 |
+
n_correct += 1
|
| 95 |
+
generation_grids.remove(t)
|
| 96 |
+
|
| 97 |
+
self.metrics["precision"] = n_correct / len_generation
|
| 98 |
+
self.metrics["recall"] = n_correct / len_golden
|
| 99 |
+
if self.metrics["precision"] + self.metrics["recall"] == 0:
|
| 100 |
+
self.metrics["f1"] = 0
|
| 101 |
+
else:
|
| 102 |
+
self.metrics["f1"] = 2 * self.metrics["precision"] * \
|
| 103 |
+
self.metrics["recall"] / (self.metrics["precision"] + self.metrics["recall"])
|
| 104 |
+
|
| 105 |
+
return
|
| 106 |
+
|
| 107 |
+
def _get_prefix(self):
|
| 108 |
+
sys_to_add = os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"]
|
| 109 |
+
# assert sys_to_add not empty
|
| 110 |
+
assert sys_to_add != "", "VLMEVAL_CHARTMIMIC_UTILS_PATH is not set"
|
| 111 |
+
return f"""
|
| 112 |
+
import warnings
|
| 113 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 114 |
+
|
| 115 |
+
if "{sys_to_add}" not in sys.path:
|
| 116 |
+
sys.path.insert(0, "{sys_to_add}")
|
| 117 |
+
|
| 118 |
+
import eval_configs.global_config as global_config
|
| 119 |
+
global_config.reset_texts()
|
| 120 |
+
from matplotlib.backends.backend_pdf import RendererPdf
|
| 121 |
+
|
| 122 |
+
grid_visibility = []
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
def _get_suffix(self, output_file):
|
| 126 |
+
return f"""
|
| 127 |
+
|
| 128 |
+
all_axes = plt.gcf().get_axes()
|
| 129 |
+
|
| 130 |
+
for ax in all_axes:
|
| 131 |
+
subplot_spec = ax.get_subplotspec()
|
| 132 |
+
row = subplot_spec.rowspan.start
|
| 133 |
+
col = subplot_spec.colspan.start
|
| 134 |
+
x_grid_visible = any(line.get_visible() for line in ax.get_xgridlines())
|
| 135 |
+
y_grid_visible = any(line.get_visible() for line in ax.get_ygridlines())
|
| 136 |
+
|
| 137 |
+
grid_visibility.append(
|
| 138 |
+
dict(
|
| 139 |
+
row=row,
|
| 140 |
+
col=col,
|
| 141 |
+
x_grid_visible=x_grid_visible,
|
| 142 |
+
y_grid_visible=y_grid_visible
|
| 143 |
+
)
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# sort the grid visibility by row and col
|
| 147 |
+
grid_visibility = sorted(grid_visibility, key=lambda x: (x['row'], x['col']))
|
| 148 |
+
|
| 149 |
+
# Since there can be twin axes, we need to merge the grid visibility, if they are in the same row and col, use "or" to merge
|
| 150 |
+
grid_visibility_merged = []
|
| 151 |
+
for i, grid in enumerate(grid_visibility):
|
| 152 |
+
if i == 0:
|
| 153 |
+
grid_visibility_merged.append(grid)
|
| 154 |
+
continue
|
| 155 |
+
|
| 156 |
+
last_grid = grid_visibility_merged[-1]
|
| 157 |
+
if last_grid['row'] == grid['row'] and last_grid['col'] == grid['col']:
|
| 158 |
+
last_grid['x_grid_visible'] = last_grid['x_grid_visible'] or grid['x_grid_visible']
|
| 159 |
+
last_grid['y_grid_visible'] = last_grid['y_grid_visible'] or grid['y_grid_visible']
|
| 160 |
+
else:
|
| 161 |
+
grid_visibility_merged.append(grid)
|
| 162 |
+
|
| 163 |
+
grid_visibility = grid_visibility_merged
|
| 164 |
+
|
| 165 |
+
# print(grid_visibility)
|
| 166 |
+
with open('{output_file}', 'w') as f:
|
| 167 |
+
f.write(str(grid_visibility))
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
if __name__ == "__main__":
|
| 172 |
+
# sys.path.insert(0, '/home/yc21/project/Princess-s-CHI')
|
| 173 |
+
|
| 174 |
+
evaluator = GridEvaluator()
|
| 175 |
+
|
| 176 |
+
for idx in range(1, 40):
|
| 177 |
+
print(f"Processing {idx}")
|
| 178 |
+
generation_code_file = f"/home/yc21/project/Princess-s-CHI/dataset/line/line_{idx}.py"
|
| 179 |
+
golden_code_file = f"/home/yc21/project/Princess-s-CHI/results/chart2code_gpt_DirectAgent_results/direct/line_{idx}.py"
|
| 180 |
+
evaluator(generation_code_file, golden_code_file)
|
| 181 |
+
print()
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/layout_evaluator.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
# from dotenv import load_dotenv
|
| 4 |
+
# load_dotenv()
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
# sys.path.insert(0, os.environ["PROJECT_PATH"])
|
| 8 |
+
|
| 9 |
+
from ..eval_configs.global_config import run_script_safe
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class LayoutEvaluator:
|
| 13 |
+
|
| 14 |
+
def __init__(self) -> None:
|
| 15 |
+
self.metrics = {
|
| 16 |
+
"precision": 0,
|
| 17 |
+
"recall": 0,
|
| 18 |
+
"f1": 0
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
def __call__(self, generation_code_file, golden_code_file):
|
| 22 |
+
generation_layouts = self._log_layouts(generation_code_file)
|
| 23 |
+
golden_layouts = self._log_layouts(golden_code_file)
|
| 24 |
+
|
| 25 |
+
self._calculate_metrics(generation_layouts, golden_layouts)
|
| 26 |
+
|
| 27 |
+
# redunant_file = os.environ["PROJECT_PATH"] + "/" + os.path.basename(golden_code_file).replace(".py", ".pdf")
|
| 28 |
+
# os.remove(redunant_file)
|
| 29 |
+
|
| 30 |
+
# print(self.metrics)
|
| 31 |
+
|
| 32 |
+
def _log_layouts(self, code_file):
|
| 33 |
+
"""
|
| 34 |
+
Get objects of the code
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
with open(code_file, 'r') as f:
|
| 38 |
+
lines = f.readlines()
|
| 39 |
+
code = ''.join(lines)
|
| 40 |
+
|
| 41 |
+
prefix = self._get_prefix()
|
| 42 |
+
output_file = code_file.replace(".py", "_log_layouts.txt")
|
| 43 |
+
if "/graph" in code_file:
|
| 44 |
+
suffix = self._get_suffix_special_for_graph(output_file)
|
| 45 |
+
else:
|
| 46 |
+
suffix = self._get_suffix(output_file)
|
| 47 |
+
|
| 48 |
+
code = prefix + code + suffix
|
| 49 |
+
|
| 50 |
+
code_log_texts_file = code_file.replace(".py", "_log_layouts.py")
|
| 51 |
+
with open(code_log_texts_file, 'w') as f:
|
| 52 |
+
f.write(code)
|
| 53 |
+
|
| 54 |
+
# os.system(f"python3 {code_log_texts_file}")
|
| 55 |
+
success = run_script_safe(code_log_texts_file)
|
| 56 |
+
if not success:
|
| 57 |
+
print("Skip downstream logic due to previous failure.")
|
| 58 |
+
# optionally return default result or continue
|
| 59 |
+
|
| 60 |
+
if os.path.exists(output_file):
|
| 61 |
+
with open(output_file, 'r') as f:
|
| 62 |
+
texts = f.read()
|
| 63 |
+
texts = eval(texts)
|
| 64 |
+
os.remove(output_file)
|
| 65 |
+
else:
|
| 66 |
+
texts = []
|
| 67 |
+
os.remove(code_log_texts_file)
|
| 68 |
+
|
| 69 |
+
return texts
|
| 70 |
+
|
| 71 |
+
def _calculate_metrics(
|
| 72 |
+
self,
|
| 73 |
+
generation_layouts: List[Tuple],
|
| 74 |
+
golden_layouts: List[Tuple]):
|
| 75 |
+
"""
|
| 76 |
+
Calculate the metrics
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
- generation_layouts: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 80 |
+
- golden_layouts: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 81 |
+
"""
|
| 82 |
+
if len(generation_layouts) == 0 or len(golden_layouts) == 0:
|
| 83 |
+
self.metrics["precision"] = 0
|
| 84 |
+
self.metrics["recall"] = 0
|
| 85 |
+
self.metrics["f1"] = 0
|
| 86 |
+
return
|
| 87 |
+
|
| 88 |
+
len_generation = len(generation_layouts)
|
| 89 |
+
len_golden = len(golden_layouts)
|
| 90 |
+
|
| 91 |
+
n_correct = 0
|
| 92 |
+
for t in golden_layouts:
|
| 93 |
+
if t in generation_layouts:
|
| 94 |
+
n_correct += 1
|
| 95 |
+
generation_layouts.remove(t)
|
| 96 |
+
|
| 97 |
+
self.metrics["precision"] = n_correct / len_generation
|
| 98 |
+
self.metrics["recall"] = n_correct / len_golden
|
| 99 |
+
if self.metrics["precision"] + self.metrics["recall"] == 0:
|
| 100 |
+
self.metrics["f1"] = 0
|
| 101 |
+
else:
|
| 102 |
+
self.metrics["f1"] = 2 * self.metrics["precision"] * \
|
| 103 |
+
self.metrics["recall"] / (self.metrics["precision"] + self.metrics["recall"])
|
| 104 |
+
|
| 105 |
+
return
|
| 106 |
+
|
| 107 |
+
def _get_prefix(self):
|
| 108 |
+
return """
|
| 109 |
+
import warnings
|
| 110 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 111 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
| 112 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
| 113 |
+
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
def _get_suffix(self, output_file):
|
| 117 |
+
return f"""
|
| 118 |
+
|
| 119 |
+
def get_gridspec_layout_info(fig):
|
| 120 |
+
layout_info = {{}}
|
| 121 |
+
for ax in fig.axes:
|
| 122 |
+
spec = ax.get_subplotspec()
|
| 123 |
+
if spec is None:
|
| 124 |
+
continue
|
| 125 |
+
gs = spec.get_gridspec()
|
| 126 |
+
nrows, ncols = gs.get_geometry()
|
| 127 |
+
row_start, row_end = spec.rowspan.start, spec.rowspan.stop - 1 # Zero-based and inclusive
|
| 128 |
+
col_start, col_end = spec.colspan.start, spec.colspan.stop - 1 # Zero-based and inclusive
|
| 129 |
+
layout_info[ax] = dict(nrows=nrows, ncols=ncols, row_start=row_start, row_end=row_end, col_start=col_start, col_end=col_end)
|
| 130 |
+
# print(layout_info)
|
| 131 |
+
layout_info = list(layout_info.values())
|
| 132 |
+
return layout_info
|
| 133 |
+
|
| 134 |
+
layout_info = get_gridspec_layout_info(fig=plt.gcf())
|
| 135 |
+
with open('{output_file}', 'w') as f:
|
| 136 |
+
f.write(str(layout_info))
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
def _get_suffix_special_for_graph(self, output_file):
|
| 140 |
+
return f"""
|
| 141 |
+
def get_gridspec_layout_info(fig):
|
| 142 |
+
layout_info = {{}}
|
| 143 |
+
for ax in fig.axes:
|
| 144 |
+
layout_info[ax] = dict(nrows=1, ncols=1, row_start=0, row_end=1, col_start=0, col_end=1)
|
| 145 |
+
# print(layout_info)
|
| 146 |
+
layout_info = list(layout_info.values())
|
| 147 |
+
return layout_info
|
| 148 |
+
|
| 149 |
+
layout_info = get_gridspec_layout_info(fig=plt.gcf())
|
| 150 |
+
with open('{output_file}', 'w') as f:
|
| 151 |
+
f.write(str(layout_info))
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
if __name__ == "__main__":
|
| 156 |
+
|
| 157 |
+
evaluator = LayoutEvaluator()
|
| 158 |
+
|
| 159 |
+
for idx in range(60, 61):
|
| 160 |
+
print(f"Processing {idx}")
|
| 161 |
+
# print("Processing Golden Code")
|
| 162 |
+
golden_code_file = f"{os.environ['PROJECT_PATH']}/dataset/ori/line_{idx}.py"
|
| 163 |
+
# print("Processing Generation Code")
|
| 164 |
+
generation_code_file = f"{os.environ['PROJECT_PATH']}/results/chart2code_gpt_ScaffoldAgent_results/scaffold/line_{idx}.py"
|
| 165 |
+
evaluator(generation_code_file, golden_code_file)
|
| 166 |
+
print()
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/legend_evaluator.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
# from dotenv import load_dotenv
|
| 4 |
+
# load_dotenv()
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
# sys.path.insert(0, os.environ["PROJECT_PATH"])
|
| 8 |
+
|
| 9 |
+
from ..eval_configs.global_config import run_script_safe
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class LegendEvaluator:
|
| 14 |
+
|
| 15 |
+
def __init__(self, use_position=True) -> None:
|
| 16 |
+
self.use_position = use_position
|
| 17 |
+
self.metrics = {
|
| 18 |
+
"precision": 0,
|
| 19 |
+
"recall": 0,
|
| 20 |
+
"f1": 0
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
def __call__(self, generation_code_file, golden_code_file):
|
| 24 |
+
generation_texts = self._log_legends(generation_code_file)
|
| 25 |
+
golden_texts = self._log_legends(golden_code_file)
|
| 26 |
+
|
| 27 |
+
self._calculate_metrics(generation_texts, golden_texts)
|
| 28 |
+
|
| 29 |
+
# redunant_file = os.environ["PROJECT_PATH"] + "/" + os.path.basename(golden_code_file).replace(".py", ".pdf")
|
| 30 |
+
# os.remove(redunant_file)
|
| 31 |
+
# print(self.metrics)
|
| 32 |
+
|
| 33 |
+
def _log_legends(self, code_file):
|
| 34 |
+
"""
|
| 35 |
+
Get legend objects of the code
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
with open(code_file, 'r') as f:
|
| 39 |
+
lines = f.readlines()
|
| 40 |
+
code = ''.join(lines)
|
| 41 |
+
|
| 42 |
+
prefix = self._get_prefix()
|
| 43 |
+
output_file = code_file.replace(".py", ".txt")
|
| 44 |
+
suffix = self._get_suffix(output_file)
|
| 45 |
+
code = prefix + code + suffix
|
| 46 |
+
|
| 47 |
+
code_log_texts_file = code_file.replace(".py", "_log_legends.py")
|
| 48 |
+
with open(code_log_texts_file, 'w') as f:
|
| 49 |
+
f.write(code)
|
| 50 |
+
|
| 51 |
+
# os.system(f"python3 {code_log_texts_file}")
|
| 52 |
+
success = run_script_safe(code_log_texts_file)
|
| 53 |
+
if not success:
|
| 54 |
+
print("Skip downstream logic due to previous failure.")
|
| 55 |
+
# optionally return default result or continue
|
| 56 |
+
|
| 57 |
+
with open(output_file, 'r') as f:
|
| 58 |
+
texts = f.read()
|
| 59 |
+
texts = eval(texts)
|
| 60 |
+
|
| 61 |
+
os.remove(code_log_texts_file)
|
| 62 |
+
os.remove(output_file)
|
| 63 |
+
|
| 64 |
+
# pdf_file = re.findall(r"plt\.savefig\('(.*)'\)", code)
|
| 65 |
+
# if len(pdf_file) != 0:
|
| 66 |
+
# pdf_file = pdf_file[0]
|
| 67 |
+
# if os.path.basename(pdf_file) == pdf_file:
|
| 68 |
+
# os.remove(pdf_file)
|
| 69 |
+
|
| 70 |
+
return texts
|
| 71 |
+
|
| 72 |
+
def _calculate_metrics(
|
| 73 |
+
self,
|
| 74 |
+
generation_texts: List[Tuple],
|
| 75 |
+
golden_texts: List[Tuple]):
|
| 76 |
+
"""
|
| 77 |
+
Calculate the metrics
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
- generation_texts: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 81 |
+
- golden_texts: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 82 |
+
"""
|
| 83 |
+
if len(generation_texts) == 0 or len(golden_texts) == 0:
|
| 84 |
+
self.metrics["precision"] = 0
|
| 85 |
+
self.metrics["recall"] = 0
|
| 86 |
+
self.metrics["f1"] = 0
|
| 87 |
+
return
|
| 88 |
+
|
| 89 |
+
len_generation = len(generation_texts)
|
| 90 |
+
len_golden = len(golden_texts)
|
| 91 |
+
|
| 92 |
+
if not self.use_position:
|
| 93 |
+
generation_texts = [t[-1] for t in generation_texts]
|
| 94 |
+
golden_texts = [t[-1] for t in golden_texts]
|
| 95 |
+
|
| 96 |
+
n_correct = 0
|
| 97 |
+
for t in golden_texts:
|
| 98 |
+
if t in generation_texts:
|
| 99 |
+
n_correct += 1
|
| 100 |
+
generation_texts.remove(t)
|
| 101 |
+
|
| 102 |
+
else:
|
| 103 |
+
generation_texts = [t[2:] for t in generation_texts]
|
| 104 |
+
golden_texts = [t[2:] for t in golden_texts]
|
| 105 |
+
|
| 106 |
+
n_correct = 0
|
| 107 |
+
for t1 in golden_texts:
|
| 108 |
+
for t2 in generation_texts:
|
| 109 |
+
# text must be equal, but x_rel and y_rel can be in a range
|
| 110 |
+
if t1[-1] == t2[-1] and abs(t1[0] - t2[0]
|
| 111 |
+
) <= 10 and abs(t1[1] - t2[1]) <= 10:
|
| 112 |
+
# print("matched:", t2)
|
| 113 |
+
n_correct += 1
|
| 114 |
+
generation_texts.remove(t2)
|
| 115 |
+
break
|
| 116 |
+
|
| 117 |
+
self.metrics["precision"] = n_correct / len_generation
|
| 118 |
+
self.metrics["recall"] = n_correct / len_golden
|
| 119 |
+
if self.metrics["precision"] + self.metrics["recall"] == 0:
|
| 120 |
+
self.metrics["f1"] = 0
|
| 121 |
+
else:
|
| 122 |
+
self.metrics["f1"] = 2 * self.metrics["precision"] * \
|
| 123 |
+
self.metrics["recall"] / (self.metrics["precision"] + self.metrics["recall"])
|
| 124 |
+
|
| 125 |
+
return
|
| 126 |
+
|
| 127 |
+
def _get_prefix(self):
|
| 128 |
+
sys_to_add = os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"]
|
| 129 |
+
# assert sys_to_add not empty
|
| 130 |
+
assert sys_to_add != "", "VLMEVAL_CHARTMIMIC_UTILS_PATH is not set"
|
| 131 |
+
return f"""
|
| 132 |
+
import warnings
|
| 133 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 134 |
+
|
| 135 |
+
if "{sys_to_add}" not in sys.path:
|
| 136 |
+
sys.path.insert(0, "{sys_to_add}")
|
| 137 |
+
|
| 138 |
+
import eval_configs.global_config as global_config
|
| 139 |
+
global_config.reset_texts()
|
| 140 |
+
from matplotlib.backends.backend_pdf import RendererPdf
|
| 141 |
+
|
| 142 |
+
drawed_legend_texts = []
|
| 143 |
+
drawed_texts = []
|
| 144 |
+
|
| 145 |
+
def log_function(func):
|
| 146 |
+
def wrapper(*args, **kwargs):
|
| 147 |
+
global drawed_texts
|
| 148 |
+
|
| 149 |
+
object = args[0]
|
| 150 |
+
x = args[2]
|
| 151 |
+
y = args[3]
|
| 152 |
+
x_rel = ( x / object.width / 72 ) * 100
|
| 153 |
+
y_rel = ( y / object.height / 72 ) * 100
|
| 154 |
+
s = args[4]
|
| 155 |
+
|
| 156 |
+
drawed_texts.append( (x, y, x_rel, y_rel, s) )
|
| 157 |
+
return func(*args, **kwargs)
|
| 158 |
+
wrapper.__name__ = func.__name__
|
| 159 |
+
return wrapper
|
| 160 |
+
|
| 161 |
+
RendererPdf.draw_text = log_function(RendererPdf.draw_text)
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
def _get_suffix(self, output_file):
|
| 165 |
+
return f"""
|
| 166 |
+
|
| 167 |
+
all_axes = plt.gcf().get_axes()
|
| 168 |
+
legends = [ax.get_legend() for ax in all_axes if ax.get_legend() is not None]
|
| 169 |
+
for legend in legends:
|
| 170 |
+
for t in legend.get_texts():
|
| 171 |
+
drawed_legend_texts.append(t.get_text())
|
| 172 |
+
|
| 173 |
+
new_drawed_legend_texts = []
|
| 174 |
+
for t1 in drawed_legend_texts:
|
| 175 |
+
for t2 in drawed_texts:
|
| 176 |
+
if t1 == t2[-1]:
|
| 177 |
+
new_drawed_legend_texts.append(t2)
|
| 178 |
+
break
|
| 179 |
+
drawed_legend_texts = new_drawed_legend_texts
|
| 180 |
+
|
| 181 |
+
with open('{output_file}', 'w') as f:
|
| 182 |
+
f.write(str(drawed_legend_texts))
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
if __name__ == "__main__":
|
| 187 |
+
# sys.path.insert(0, '/home/yc21/project/Princess-s-CHI')
|
| 188 |
+
|
| 189 |
+
evaluator = LegendEvaluator()
|
| 190 |
+
|
| 191 |
+
generation_code_file = "/home/yc21/project/Princess-s-CHI/dataset/line/line_9.py"
|
| 192 |
+
golden_code_file = "/home/yc21/project/Princess-s-CHI/results/chart2code_gpt_DirectAgent_results/direct/line_9.py"
|
| 193 |
+
|
| 194 |
+
evaluator(generation_code_file, golden_code_file)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/text_evaluator.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
# from dotenv import load_dotenv
|
| 4 |
+
# load_dotenv()
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
from ..eval_configs.global_config import run_script_safe
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TextEvaluator:
|
| 12 |
+
|
| 13 |
+
def __init__(self, use_position=False, use_axs=True) -> None:
|
| 14 |
+
self.metrics = {
|
| 15 |
+
"precision": 0,
|
| 16 |
+
"recall": 0,
|
| 17 |
+
"f1": 0
|
| 18 |
+
}
|
| 19 |
+
self.use_position = use_position
|
| 20 |
+
self.use_axs = use_axs
|
| 21 |
+
|
| 22 |
+
def __call__(self, generation_code_file, golden_code_file):
|
| 23 |
+
generation_texts = self._log_texts(generation_code_file)
|
| 24 |
+
golden_texts = self._log_texts(golden_code_file)
|
| 25 |
+
|
| 26 |
+
self._calculate_metrics(generation_texts, golden_texts)
|
| 27 |
+
|
| 28 |
+
# [TAG] What is this for?
|
| 29 |
+
# print(f"os.getcwd(): {os.getcwd()}")
|
| 30 |
+
# breakpoint()
|
| 31 |
+
# redunant_file = os.environ["PROJECT_PATH"] + "/" + os.path.basename(golden_code_file).replace(".py", ".pdf")
|
| 32 |
+
# os.remove(redunant_file)
|
| 33 |
+
# print(self.metrics)
|
| 34 |
+
|
| 35 |
+
def _log_texts(self, code_file):
|
| 36 |
+
"""
|
| 37 |
+
Get text objects of the code
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
with open(code_file, 'r') as f:
|
| 41 |
+
lines = f.readlines()
|
| 42 |
+
code = ''.join(lines)
|
| 43 |
+
|
| 44 |
+
prefix = self._get_prefix()
|
| 45 |
+
output_file = code_file.replace(".py", "_log_texts.txt")
|
| 46 |
+
suffix = self._get_suffix(output_file)
|
| 47 |
+
code = prefix + code + suffix
|
| 48 |
+
|
| 49 |
+
if not self.use_axs:
|
| 50 |
+
# find plt.savefig and append code before it
|
| 51 |
+
savefig_idx = code.find("plt.savefig")
|
| 52 |
+
ax_ticks_deletion_code = self._get_ax_ticks_deletion_code()
|
| 53 |
+
code = code[:savefig_idx] + \
|
| 54 |
+
ax_ticks_deletion_code + code[savefig_idx:]
|
| 55 |
+
|
| 56 |
+
code_log_texts_file = code_file.replace(".py", "_log_texts.py")
|
| 57 |
+
with open(code_log_texts_file, 'w') as f:
|
| 58 |
+
f.write(code)
|
| 59 |
+
|
| 60 |
+
# os.system(f"python3 {code_log_texts_file}")
|
| 61 |
+
success = run_script_safe(code_log_texts_file)
|
| 62 |
+
if not success:
|
| 63 |
+
print("Skip downstream logic due to previous failure.")
|
| 64 |
+
# optionally return default result or continue
|
| 65 |
+
|
| 66 |
+
if os.path.exists(output_file):
|
| 67 |
+
with open(output_file, 'r') as f:
|
| 68 |
+
texts = f.read()
|
| 69 |
+
texts = eval(texts)
|
| 70 |
+
os.remove(output_file)
|
| 71 |
+
else:
|
| 72 |
+
texts = []
|
| 73 |
+
os.remove(code_log_texts_file)
|
| 74 |
+
|
| 75 |
+
# pdf_file = re.findall(r"plt\.savefig\('(.*)'\)", code)
|
| 76 |
+
# if len(pdf_file) != 0:
|
| 77 |
+
# pdf_file = pdf_file[0]
|
| 78 |
+
# if os.path.basename(pdf_file) == pdf_file:
|
| 79 |
+
# os.remove(pdf_file)
|
| 80 |
+
|
| 81 |
+
return texts
|
| 82 |
+
|
| 83 |
+
def _calculate_metrics(
|
| 84 |
+
self,
|
| 85 |
+
generation_texts: List[Tuple],
|
| 86 |
+
golden_texts: List[Tuple]):
|
| 87 |
+
"""
|
| 88 |
+
Calculate the metrics
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
- generation_texts: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 92 |
+
- golden_texts: List of tuples of texts, [(x, y, x_rel, y_rel, text), ...]
|
| 93 |
+
"""
|
| 94 |
+
if len(generation_texts) == 0 or len(golden_texts) == 0:
|
| 95 |
+
self.metrics["precision"] = 0
|
| 96 |
+
self.metrics["recall"] = 0
|
| 97 |
+
self.metrics["f1"] = 0
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
len_generation = len(generation_texts)
|
| 101 |
+
len_golden = len(golden_texts)
|
| 102 |
+
|
| 103 |
+
if not self.use_position:
|
| 104 |
+
generation_texts = [t[-1] for t in generation_texts]
|
| 105 |
+
golden_texts = [t[-1] for t in golden_texts]
|
| 106 |
+
|
| 107 |
+
n_correct = 0
|
| 108 |
+
for t in golden_texts:
|
| 109 |
+
if t in generation_texts:
|
| 110 |
+
n_correct += 1
|
| 111 |
+
generation_texts.remove(t)
|
| 112 |
+
|
| 113 |
+
else:
|
| 114 |
+
generation_texts = [t[2:] for t in generation_texts]
|
| 115 |
+
golden_texts = [t[2:] for t in golden_texts]
|
| 116 |
+
|
| 117 |
+
n_correct = 0
|
| 118 |
+
for t1 in golden_texts:
|
| 119 |
+
for t2 in generation_texts:
|
| 120 |
+
# text must be equal, but x_rel and y_rel can be in a range
|
| 121 |
+
if t1[-1] == t2[-1] and abs(t1[0] - t2[0]
|
| 122 |
+
) <= 10 and abs(t1[1] - t2[1]) <= 10:
|
| 123 |
+
# print("matched:", t2)
|
| 124 |
+
n_correct += 1
|
| 125 |
+
generation_texts.remove(t2)
|
| 126 |
+
break
|
| 127 |
+
|
| 128 |
+
self.metrics["precision"] = n_correct / len_generation
|
| 129 |
+
self.metrics["recall"] = n_correct / len_golden
|
| 130 |
+
if self.metrics["precision"] + self.metrics["recall"] == 0:
|
| 131 |
+
self.metrics["f1"] = 0
|
| 132 |
+
else:
|
| 133 |
+
self.metrics["f1"] = 2 * self.metrics["precision"] * \
|
| 134 |
+
self.metrics["recall"] / (self.metrics["precision"] + self.metrics["recall"])
|
| 135 |
+
|
| 136 |
+
return
|
| 137 |
+
|
| 138 |
+
def _get_prefix(self):
|
| 139 |
+
sys_to_add = os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"]
|
| 140 |
+
# assert sys_to_add not empty
|
| 141 |
+
assert sys_to_add != "", "VLMEVAL_CHARTMIMIC_UTILS_PATH is not set"
|
| 142 |
+
return f"""
|
| 143 |
+
import warnings
|
| 144 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 145 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
| 146 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
| 147 |
+
|
| 148 |
+
import sys
|
| 149 |
+
if "{sys_to_add}" not in sys.path:
|
| 150 |
+
sys.path.insert(0, "{sys_to_add}")
|
| 151 |
+
|
| 152 |
+
import eval_configs.global_config as global_config
|
| 153 |
+
global_config.reset_texts()
|
| 154 |
+
from matplotlib.backends.backend_pdf import RendererPdf
|
| 155 |
+
|
| 156 |
+
drawed_texts = []
|
| 157 |
+
|
| 158 |
+
def log_function(func):
|
| 159 |
+
def wrapper(*args, **kwargs):
|
| 160 |
+
global drawed_texts
|
| 161 |
+
|
| 162 |
+
object = args[0]
|
| 163 |
+
x = args[2]
|
| 164 |
+
y = args[3]
|
| 165 |
+
x_rel = ( x / object.width / 72 ) * 100
|
| 166 |
+
y_rel = ( y / object.height / 72 ) * 100
|
| 167 |
+
s = args[4]
|
| 168 |
+
|
| 169 |
+
drawed_texts.append( (x, y, x_rel, y_rel, s) )
|
| 170 |
+
return func(*args, **kwargs)
|
| 171 |
+
wrapper.__name__ = func.__name__
|
| 172 |
+
return wrapper
|
| 173 |
+
|
| 174 |
+
RendererPdf.draw_text = log_function(RendererPdf.draw_text)
|
| 175 |
+
"""
|
| 176 |
+
|
| 177 |
+
def _get_suffix(self, output_file):
|
| 178 |
+
return f"""
|
| 179 |
+
# print("drawed_texts", drawed_texts)
|
| 180 |
+
with open('{output_file}', 'w') as f:
|
| 181 |
+
f.write(str(drawed_texts))
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
def _get_ax_ticks_deletion_code(self):
|
| 185 |
+
return """
|
| 186 |
+
all_axes = plt.gcf().get_axes()
|
| 187 |
+
for ax in all_axes:
|
| 188 |
+
ax.set_xticks([])
|
| 189 |
+
ax.set_yticks([])
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
if __name__ == "__main__":
|
| 194 |
+
# sys.path.insert(0, '/home/yc21/project/Princess-s-CHI')
|
| 195 |
+
|
| 196 |
+
evaluator = TextEvaluator(use_axs=False)
|
| 197 |
+
# evaluator = TextEvaluator()
|
| 198 |
+
|
| 199 |
+
generation_code_file = "/home/yc21/project/Princess-s-CHI/dataset/line/line_7.py"
|
| 200 |
+
golden_code_file = "/home/yc21/project/Princess-s-CHI/results/chart2code_gpt_DirectAgent_results/direct/line_7.py"
|
| 201 |
+
|
| 202 |
+
evaluator(generation_code_file, golden_code_file)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/crpe.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import argparse
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def is_correct(predict, answer):
|
| 7 |
+
# predict是标准答案 answer是预测
|
| 8 |
+
if len(answer) == 1:
|
| 9 |
+
return answer[0] == predict[0]
|
| 10 |
+
elif len(answer) != 1 and answer[0] in ['A', 'B', 'C', 'D']:
|
| 11 |
+
return answer[0] == predict[0]
|
| 12 |
+
elif len(answer) != 1 and answer[0] not in ['A', 'B', 'C', 'D']:
|
| 13 |
+
return predict[4:].lower() in answer.lower()
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mathverse.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from ...utils import can_infer
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_gpt4_extract_ICE():
|
| 9 |
+
example_1 = """
|
| 10 |
+
1.
|
| 11 |
+
Model response: 'Rounded to two decimal places, the perimeter of the sector is approximately:\n\n(-2, 1)'
|
| 12 |
+
Extracted Answer: (-2, 1)
|
| 13 |
+
""" # noqa
|
| 14 |
+
|
| 15 |
+
example_2 = """
|
| 16 |
+
2.
|
| 17 |
+
Model response: 'at those points.\n\nTherefore, the correct option that represents the meaning of the intersection points of the graphs is:\n\nD. They give the solutions to the equation $f(t)=g(t)$.",'
|
| 18 |
+
Extracted Answer: D
|
| 19 |
+
""" # noqa
|
| 20 |
+
|
| 21 |
+
example_3 = """
|
| 22 |
+
3.
|
| 23 |
+
Model response: ' at 1 (there's a closed circle at y = 1), the range in interval notation is \\((-4, 1]\\).\n\nFinal values:\nDomain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\)'
|
| 24 |
+
Extracted Answer: Domain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\)
|
| 25 |
+
""" # noqa
|
| 26 |
+
|
| 27 |
+
example_4 = """
|
| 28 |
+
4.
|
| 29 |
+
Model response: 'As it stands, I cannot provide the correct option letter because there isn't enough information to solve for 'y'.'
|
| 30 |
+
Extracted Answer: null
|
| 31 |
+
""" # noqa
|
| 32 |
+
|
| 33 |
+
example_5 = """
|
| 34 |
+
5.
|
| 35 |
+
Model response: 'Given that AB = 17.6 meters, we can now substitute into the equation:\n\nd = 17.6 / cos(38\u00b0)\n\nTherefore, to one decimal place, the distance d between Ned and Bart is approximately 22.3 meters.'
|
| 36 |
+
Extracted answer: 22.3
|
| 37 |
+
""" # noqa
|
| 38 |
+
|
| 39 |
+
example_6 = """
|
| 40 |
+
6.
|
| 41 |
+
Model response: have all the coefficients for the quadratic function:\n\\( f(x) = ax^2 + bx + c \\)\n\\( f(x) = -1x^2 - 2x + 1 \\)\n\nTherefore, the equation for the graphed function \\( f \\) is:\n\\( f(x) = -x^2 - 2x + 1 \\)"'
|
| 42 |
+
Extracted answer: f(x) = -x^2 - 2x + 1
|
| 43 |
+
""" # noqa
|
| 44 |
+
|
| 45 |
+
return [example_1, example_2, example_3, example_4, example_5, example_6]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_gpt4_score_ICE():
|
| 49 |
+
example_1 = """
|
| 50 |
+
[Question]: Write the set of numbers represented on the number line in interval notation.
|
| 51 |
+
[Standard Answer]: (-2,1]
|
| 52 |
+
[Model_answer] : Extracted Answer: \\((-2, 1)\\)
|
| 53 |
+
Judgement: 0
|
| 54 |
+
""" # noqa
|
| 55 |
+
|
| 56 |
+
example_2 = """
|
| 57 |
+
[Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}}
|
| 58 |
+
[Standard Answer]: C
|
| 59 |
+
[Model_answer] : B:2\u221a{{3}}
|
| 60 |
+
Judgement: 0
|
| 61 |
+
""" # noqa
|
| 62 |
+
|
| 63 |
+
example_3 = """
|
| 64 |
+
[Question]: Find the domain and range of the function f using interval notation.
|
| 65 |
+
[Standard Answer]: domain: [-4, 0) and range: (-3, 1]
|
| 66 |
+
[Model_answer] : Range: \\((-4, 1]\\)
|
| 67 |
+
Judgement: 0
|
| 68 |
+
""" # noqa
|
| 69 |
+
|
| 70 |
+
example_4 = """
|
| 71 |
+
[Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}}
|
| 72 |
+
[Standard Answer]: C
|
| 73 |
+
[Model_answer] : null
|
| 74 |
+
Judgement: 0
|
| 75 |
+
""" # noqa
|
| 76 |
+
|
| 77 |
+
return [example_1, example_2, example_3, example_4]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def build_mathverse_gpt4_extract_prompt(line):
|
| 81 |
+
task_description = """
|
| 82 |
+
I am providing you a response from a model to a math problem, termed 'Model Response'. You should extract the answer from the response as 'Extracted Answer'. Directly output the extracted answer with no explanation.\n\n
|
| 83 |
+
""" # noqa
|
| 84 |
+
prediction = str(line['prediction'])
|
| 85 |
+
demo_prompt = task_description
|
| 86 |
+
examples = get_gpt4_extract_ICE()
|
| 87 |
+
for example in examples:
|
| 88 |
+
demo_prompt += example + '\n\n'
|
| 89 |
+
test_prompt = f"Model response: '{prediction}'\nExtracted Answer: "
|
| 90 |
+
full_prompt = f'{demo_prompt}7.\n{test_prompt}'
|
| 91 |
+
|
| 92 |
+
return full_prompt
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def build_mathverse_gpt4_score_prompt(line):
|
| 96 |
+
task_description = """
|
| 97 |
+
Below are two answers to a math question. Question is [Question], [Standard Answer] is the standard answer to the question, and [Model_answer] is the answer extracted from a model's output to this question. Determine whether these two answers are consistent.
|
| 98 |
+
Please note that only when the [Model_answer] completely matches the [Standard Answer] means they are consistent. For non-multiple-choice questions, if the meaning is expressed in the same way, it is also considered consistent, for example, 0.5m and 50cm.
|
| 99 |
+
If they are consistent, Judement is 1; if they are different, Judement is 0.\n\n
|
| 100 |
+
""" # noqa
|
| 101 |
+
question_for_eval = line['question_for_eval']
|
| 102 |
+
extract = line['extract']
|
| 103 |
+
answer = line['answer']
|
| 104 |
+
demo_prompt = task_description
|
| 105 |
+
examples = get_gpt4_score_ICE()
|
| 106 |
+
for example in examples:
|
| 107 |
+
demo_prompt += example + '\n\n'
|
| 108 |
+
test_prompt = f"""
|
| 109 |
+
[Question]: {question_for_eval}
|
| 110 |
+
[Standard Answer]: {answer}
|
| 111 |
+
[Model_answer] : {extract}
|
| 112 |
+
Judgement:"""
|
| 113 |
+
full_prompt = f'{demo_prompt}{test_prompt}'
|
| 114 |
+
|
| 115 |
+
return full_prompt
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def post_check_score(line, prefetch=False):
|
| 119 |
+
ans = str(line['answer']).strip()
|
| 120 |
+
response = str(line['extract']).strip()
|
| 121 |
+
|
| 122 |
+
if response == ans:
|
| 123 |
+
return response if prefetch else True
|
| 124 |
+
else:
|
| 125 |
+
return False
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def MathVerse_auxeval_extract(model, line):
|
| 129 |
+
prompt = build_mathverse_gpt4_extract_prompt(line)
|
| 130 |
+
log = ''
|
| 131 |
+
retry = 5
|
| 132 |
+
for i in range(retry):
|
| 133 |
+
prediction = line['prediction']
|
| 134 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 135 |
+
|
| 136 |
+
if FAIL_MSG in res:
|
| 137 |
+
log += f'Try {i}: output is {prediction}, failed to parse.\n'
|
| 138 |
+
else:
|
| 139 |
+
log += 'Succeed'
|
| 140 |
+
return dict(log_extract=log, extract=res)
|
| 141 |
+
log += 'All 5 retries failed.\n'
|
| 142 |
+
return dict(log_extract=log, extract='')
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def MathVerse_auxeval_score(model, line):
|
| 146 |
+
prompt = build_mathverse_gpt4_score_prompt(line)
|
| 147 |
+
log = ''
|
| 148 |
+
retry = 5
|
| 149 |
+
if post_check_score(line, prefetch=True):
|
| 150 |
+
res = post_check_score(line, prefetch=True)
|
| 151 |
+
return dict(log_score='Prefetch succeed', score=True)
|
| 152 |
+
for i in range(retry):
|
| 153 |
+
prediction = line['prediction']
|
| 154 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 155 |
+
|
| 156 |
+
if FAIL_MSG in res or res.strip() not in ['0', '1']:
|
| 157 |
+
log += f'Try {i}: output is {prediction}, res is {res}, failed to parse.\n'
|
| 158 |
+
else:
|
| 159 |
+
log += 'Succeed'
|
| 160 |
+
return dict(log_score=log, score=int(res) == 1)
|
| 161 |
+
log += 'All 5 retries failed.\n'
|
| 162 |
+
return dict(log_score=log, score=False)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def MathVerse_acc(result_file):
|
| 166 |
+
df = load(result_file)
|
| 167 |
+
|
| 168 |
+
df['metadata'] = df['metadata'].apply(lambda x: x.replace("'", '"'))
|
| 169 |
+
df['metadata'] = df['metadata'].apply(json.loads)
|
| 170 |
+
df_metadata = pd.json_normalize(df['metadata'])
|
| 171 |
+
df = pd.concat([df.drop('metadata', axis=1), df_metadata], axis=1)
|
| 172 |
+
|
| 173 |
+
subset = list(set(df['problem_version']))
|
| 174 |
+
|
| 175 |
+
res = defaultdict(list)
|
| 176 |
+
for p in subset:
|
| 177 |
+
if p != 'Overall':
|
| 178 |
+
sub = df[df['problem_version'] == p]
|
| 179 |
+
else:
|
| 180 |
+
sub = cp.deepcopy(df)
|
| 181 |
+
res['split'].append(p)
|
| 182 |
+
# Overall Acc
|
| 183 |
+
res['Overall'].append(np.mean(sub['score']) * 100)
|
| 184 |
+
# Subject
|
| 185 |
+
subjects = set(df['subject'])
|
| 186 |
+
for k in subjects:
|
| 187 |
+
res[k].append(np.mean(sub[sub['subject'] == k]['score']) * 100)
|
| 188 |
+
# Subfield
|
| 189 |
+
subfields = set(df['subfield'])
|
| 190 |
+
for k in subfields:
|
| 191 |
+
res[k].append(np.mean(sub[sub['subfield'] == k]['score']) * 100)
|
| 192 |
+
|
| 193 |
+
return pd.DataFrame(res)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (2.14 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/__pycache__/answer_str_parse.cpython-310.pyc
ADDED
|
Binary file (2.69 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/__pycache__/dummy_parse.cpython-310.pyc
ADDED
|
Binary file (568 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/__pycache__/json_parse.cpython-310.pyc
ADDED
|
Binary file (805 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/__pycache__/parsers.cpython-310.pyc
ADDED
|
Binary file (3.19 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (2.56 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/parsers.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import json
|
| 3 |
+
import re
|
| 4 |
+
import regex # Supports the non-standard ?R regex operator
|
| 5 |
+
from typing import List
|
| 6 |
+
from .utils import extract_code_block_content, extract_answer_at_beginning_of_line
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
PARSING_TIMEOUT = 0.1
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def parse_json(response: str):
|
| 13 |
+
"""Parse the JSON object, including nested JSON strings."""
|
| 14 |
+
|
| 15 |
+
response_ = extract_answer_at_beginning_of_line(response)
|
| 16 |
+
|
| 17 |
+
# If it's wrapped in code block like json, drop it
|
| 18 |
+
response_, _ = extract_code_block_content(response_, "json")
|
| 19 |
+
|
| 20 |
+
# Regular expression to match JSON-like structures, including nested quotes
|
| 21 |
+
json_pattern = r"(\{(?:[^{}]|(?R))*\}|\[(?:[^{}]|(?R))*\])"
|
| 22 |
+
string_pattern = r'"(?:\\.|[^"\\])*"'
|
| 23 |
+
|
| 24 |
+
# Find all potential JSON objects
|
| 25 |
+
try:
|
| 26 |
+
potential_jsons = regex.findall(
|
| 27 |
+
json_pattern, response_, timeout=PARSING_TIMEOUT
|
| 28 |
+
)
|
| 29 |
+
except TimeoutError:
|
| 30 |
+
if response_.startswith("["):
|
| 31 |
+
return []
|
| 32 |
+
return {}
|
| 33 |
+
|
| 34 |
+
valid_jsons = []
|
| 35 |
+
|
| 36 |
+
for potential_json in potential_jsons:
|
| 37 |
+
# Replace escaped quotes with a placeholder
|
| 38 |
+
potential_json = potential_json.replace('\\"', "__DOUBLE_QUOTE__")
|
| 39 |
+
potential_json = potential_json.replace("\\'", "__SINGLE_QUOTE__")
|
| 40 |
+
|
| 41 |
+
# Find all string literals
|
| 42 |
+
strings = regex.findall(string_pattern, potential_json)
|
| 43 |
+
|
| 44 |
+
# Process each string literal
|
| 45 |
+
for s in strings:
|
| 46 |
+
# Unescape the string content
|
| 47 |
+
unescaped = (
|
| 48 |
+
s[1:-1]
|
| 49 |
+
.replace("__DOUBLE_QUOTE__", '"')
|
| 50 |
+
.replace("__SINGLE_QUOTE__", "'")
|
| 51 |
+
)
|
| 52 |
+
# Try to parse it as JSON
|
| 53 |
+
try:
|
| 54 |
+
parsed = json.loads(unescaped)
|
| 55 |
+
if isinstance(parsed, (dict, list)):
|
| 56 |
+
# If it's a valid JSON object or array, replace it in the original string
|
| 57 |
+
potential_json = potential_json.replace(s, json.dumps(parsed))
|
| 58 |
+
except json.JSONDecodeError:
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
# Restore escaped quotes
|
| 62 |
+
potential_json = potential_json.replace("__DOUBLE_QUOTE__", '\\"')
|
| 63 |
+
potential_json = potential_json.replace("__SINGLE_QUOTE__", "\\'")
|
| 64 |
+
|
| 65 |
+
try:
|
| 66 |
+
# Attempt to parse the potential JSON
|
| 67 |
+
json_object = json.loads(potential_json)
|
| 68 |
+
valid_jsons.append(json_object)
|
| 69 |
+
except json.JSONDecodeError:
|
| 70 |
+
# try to update single quote to double quote for some special failure case
|
| 71 |
+
# caused by quote's type
|
| 72 |
+
potential_json_ = re.sub(r"(?<!\w)\'|\'(?!\w)", '"', potential_json)
|
| 73 |
+
try:
|
| 74 |
+
json_object = json.loads(potential_json_)
|
| 75 |
+
valid_jsons.append(json_object)
|
| 76 |
+
except json.JSONDecodeError:
|
| 77 |
+
# If parsing still fails, it's not a valid JSON object
|
| 78 |
+
pass
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
# Attempt to parse the Python structure
|
| 82 |
+
valid_jsons.append(ast.literal_eval(potential_json))
|
| 83 |
+
continue
|
| 84 |
+
except (SyntaxError, ValueError):
|
| 85 |
+
pass
|
| 86 |
+
# Escape the backslashes.
|
| 87 |
+
potential_json = potential_json.replace('\\"', '\\\\"')
|
| 88 |
+
potential_json = potential_json.replace("\\'", "\\\\'")
|
| 89 |
+
try:
|
| 90 |
+
# Attempt to parse the Python structure
|
| 91 |
+
valid_jsons.append(ast.literal_eval(potential_json))
|
| 92 |
+
except (SyntaxError, ValueError):
|
| 93 |
+
pass
|
| 94 |
+
|
| 95 |
+
# Return the last valid JSON if any, otherwise an empty dict or list
|
| 96 |
+
if valid_jsons:
|
| 97 |
+
return valid_jsons[-1]
|
| 98 |
+
if response_.startswith("["):
|
| 99 |
+
return []
|
| 100 |
+
return {}
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def parse_nested_str_list(input_string):
|
| 104 |
+
# Add quotation marks around the words in the string
|
| 105 |
+
quoted_string = re.sub(r"(\w+)", r'"\1"', input_string)
|
| 106 |
+
|
| 107 |
+
# Safely evaluate the string as a Python object
|
| 108 |
+
try:
|
| 109 |
+
python_object = ast.literal_eval(quoted_string)
|
| 110 |
+
return python_object
|
| 111 |
+
except (ValueError, SyntaxError) as e:
|
| 112 |
+
print(f"Failed to convert string to Python object: {e}")
|
| 113 |
+
return input_string
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def parse_syllable_ranges(input_str: str) -> List[List[int]]:
|
| 117 |
+
"""Convert a bunch of syllable ranges into a list of intervals.
|
| 118 |
+
|
| 119 |
+
Examples:
|
| 120 |
+
parse_syllable_ranges('[7,10][7, 10][5,7][5,7][7,10]')
|
| 121 |
+
>>> [[7, 10], [7, 10], [5, 7], [5, 7], [7, 10]]
|
| 122 |
+
parse_syllable_ranges('575 575')
|
| 123 |
+
>>> [[5, 5], [7, 7], [5, 5], [0, 0], [5, 5], [7, 7], [5, 5]]
|
| 124 |
+
parse_syllable_ranges('[11]5')
|
| 125 |
+
>>> [[11, 11], [5, 5]]
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
def convert_to_range(match):
|
| 129 |
+
match = match.strip("[]")
|
| 130 |
+
if "," in match:
|
| 131 |
+
start, end = map(int, match.split(","))
|
| 132 |
+
return [start, end]
|
| 133 |
+
elif match == " ":
|
| 134 |
+
return [0, 0]
|
| 135 |
+
else:
|
| 136 |
+
num = int(match)
|
| 137 |
+
return [num, num]
|
| 138 |
+
|
| 139 |
+
# Split the input string into chunks
|
| 140 |
+
chunks = re.findall(r"(?:\[\d+(?:,\s*\d+)?\]|\d| )", input_str.strip())
|
| 141 |
+
|
| 142 |
+
# Convert each chunk to a range and create the result list
|
| 143 |
+
result = [convert_to_range(chunk) for chunk in chunks]
|
| 144 |
+
|
| 145 |
+
return result
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/common/utils.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import ast
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def extract_code_block_content(
|
| 6 |
+
response,
|
| 7 |
+
code_type=None,
|
| 8 |
+
is_ascii_art: bool = False,
|
| 9 |
+
should_remove_surrounding_whitespace=True,
|
| 10 |
+
):
|
| 11 |
+
# If code_type is specified, construct the pattern to match that specific code block
|
| 12 |
+
if code_type:
|
| 13 |
+
pattern = rf"```{code_type}\s*\n*(.*?)\s*```"
|
| 14 |
+
elif is_ascii_art:
|
| 15 |
+
if not response.strip() or len(response) > 10000:
|
| 16 |
+
# handle the special case of pure whitespace or super long empty string
|
| 17 |
+
response = response.rstrip()
|
| 18 |
+
if should_remove_surrounding_whitespace:
|
| 19 |
+
pattern = r"```\w*(?:\s*\n+)?(.*?)\s*```"
|
| 20 |
+
else:
|
| 21 |
+
pattern = r"```\w*(?:\s*\n+)?(.*?)(?:\n+\s*)?```"
|
| 22 |
+
else:
|
| 23 |
+
# If code_type is None, match any code block
|
| 24 |
+
pattern = r"```\w*\s*\n*(.*?)\s*```"
|
| 25 |
+
|
| 26 |
+
# Search for the code block in the response
|
| 27 |
+
match = re.search(pattern, response, flags=re.DOTALL)
|
| 28 |
+
|
| 29 |
+
if match:
|
| 30 |
+
# If a match is found, return the content inside the code block
|
| 31 |
+
if is_ascii_art:
|
| 32 |
+
return match.group(1), True
|
| 33 |
+
else:
|
| 34 |
+
return match.group(1).strip(), True
|
| 35 |
+
else:
|
| 36 |
+
# If no code block is found, return the original string
|
| 37 |
+
return response, False
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def extract_answer_content(
|
| 41 |
+
response, is_ascii_art=False, should_remove_surrounding_whitespace=True
|
| 42 |
+
):
|
| 43 |
+
if is_ascii_art:
|
| 44 |
+
match = re.search(r"\*\*?Answer:(.*?)\*\*?|\bAnswer:(.*)", response, re.DOTALL)
|
| 45 |
+
else:
|
| 46 |
+
match = re.search(
|
| 47 |
+
r"\*\*?Answer:\s*(.*?)\*\*?|\bAnswer:\s*(.*)", response, re.DOTALL
|
| 48 |
+
)
|
| 49 |
+
if match:
|
| 50 |
+
# Extract the content after "Answer:"
|
| 51 |
+
response = match.group(1) or match.group(
|
| 52 |
+
2
|
| 53 |
+
) # Return the first capturing group or second if the first is None
|
| 54 |
+
if response is None:
|
| 55 |
+
response = ""
|
| 56 |
+
if is_ascii_art:
|
| 57 |
+
# Reduce anything that is more than one blank line to a single blank line.
|
| 58 |
+
response = re.sub(r"^\s*$(\n^\s*$)+", "", response, flags=re.MULTILINE)
|
| 59 |
+
|
| 60 |
+
if should_remove_surrounding_whitespace:
|
| 61 |
+
# Remove trailing whitespace
|
| 62 |
+
response = response.rstrip()
|
| 63 |
+
else:
|
| 64 |
+
# Remove trailing blank lines
|
| 65 |
+
response = re.sub(r"(\n\s*)+$", "", response)
|
| 66 |
+
# Remove leading blank lines
|
| 67 |
+
response = re.sub(r"^(\s*\n)+", "", response)
|
| 68 |
+
else:
|
| 69 |
+
response = response.strip()
|
| 70 |
+
|
| 71 |
+
return response
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def extract_answer_at_beginning_of_line(response):
|
| 75 |
+
# Regular expression to match either "Answer:" or "**Answer:**" at the beginning of a new line
|
| 76 |
+
match = re.search(r"^(?:\*\*Answer:|Answer:)\s*(.+)", response, re.MULTILINE)
|
| 77 |
+
|
| 78 |
+
if match:
|
| 79 |
+
# Return the content after "Answer:" or "**Answer: **"
|
| 80 |
+
return match.group(1).strip()
|
| 81 |
+
else:
|
| 82 |
+
# Return None if no match is found
|
| 83 |
+
return response.strip()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def drop_additional_text(result):
|
| 87 |
+
# Heuristic to catch multiple-choice queries. Does not use metadata.json.
|
| 88 |
+
result_first_paragraph = result.split("\n\n")[0].strip()
|
| 89 |
+
potential_ans_in_single_line = re.search(
|
| 90 |
+
r"^(?:(?:[a-zA-Z0-9_-]+)(?:,\s*[a-zA-Z0-9_-]+)*|(?:[a-zA-Z0-9_-]+)\.|\((?:[a-zA-Z0-9_-]+)\)$)",
|
| 91 |
+
result_first_paragraph,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
only_return_first_paragraph = (
|
| 95 |
+
potential_ans_in_single_line
|
| 96 |
+
and result_first_paragraph.strip() != ""
|
| 97 |
+
and not _is_multiline_answer(result)
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
if only_return_first_paragraph:
|
| 101 |
+
return result_first_paragraph
|
| 102 |
+
else:
|
| 103 |
+
return result
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def _is_multiline_answer(text):
|
| 107 |
+
# Split the text into lines
|
| 108 |
+
lines = text.splitlines()
|
| 109 |
+
|
| 110 |
+
# Find the "Answer:" line
|
| 111 |
+
for i, line in enumerate(lines):
|
| 112 |
+
stripped_line = line.strip()
|
| 113 |
+
if stripped_line != "":
|
| 114 |
+
# Check if the next line (second line after "Answer:") is blank
|
| 115 |
+
if i + 1 < len(lines) and lines[i + 1].strip() == "":
|
| 116 |
+
return False # Second line is blank, single-line answer,
|
| 117 |
+
# remaining parts are additional
|
| 118 |
+
return True # Second line is not blank, multi-line answer
|
| 119 |
+
|
| 120 |
+
return False # empty result found, treat as single-line
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def evaluate_as_string(s):
|
| 124 |
+
try:
|
| 125 |
+
# Try to evaluate the string using ast.literal_eval
|
| 126 |
+
evaluated = ast.literal_eval(s)
|
| 127 |
+
# If it's a valid Python string, return it
|
| 128 |
+
if isinstance(evaluated, str):
|
| 129 |
+
return evaluated
|
| 130 |
+
else:
|
| 131 |
+
# If it's not a string, return the original input
|
| 132 |
+
return s
|
| 133 |
+
except (ValueError, SyntaxError):
|
| 134 |
+
# If it's not valid, return the original input
|
| 135 |
+
return s
|
| 136 |
+
except MemoryError:
|
| 137 |
+
# the result overflows, simply return an empty string
|
| 138 |
+
return ""
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/ascii_art_gpt4o_judge.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Return if two ASCII art images depict the same thing."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
from numbers import Number
|
| 5 |
+
import os
|
| 6 |
+
import requests
|
| 7 |
+
from .common.conversions import ascii_text_to_image
|
| 8 |
+
from .vlm_as_judge import OpenAIVLMJudger
|
| 9 |
+
"""Return if two ASCII art images depict the same thing."""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class AsciiArtGPT4OJudge(OpenAIVLMJudger):
|
| 13 |
+
"""A GPT-4o judge for assessing if two ASCII art images depict the same thing."""
|
| 14 |
+
|
| 15 |
+
def __init__(self, metric_config, model="gpt-4o-2024-08-06"):
|
| 16 |
+
self.eval_prompt = """Determine if the following two ASCII art images depict the same object.
|
| 17 |
+
Your answer should be either "yes" or "no", but without the quotation marks."""
|
| 18 |
+
super().__init__(
|
| 19 |
+
metric_config,
|
| 20 |
+
model,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
def encode_image(self, image):
|
| 24 |
+
"""Encode an image into base64 and return its mime type."""
|
| 25 |
+
mime_type = "image/jpeg"
|
| 26 |
+
image_format = "JPEG"
|
| 27 |
+
|
| 28 |
+
if image.mode == "RGBA":
|
| 29 |
+
image = self._rgba_to_rgb(image)
|
| 30 |
+
|
| 31 |
+
if self.resize and max(image.size) > self.max_side:
|
| 32 |
+
image = self._resize_image(image)
|
| 33 |
+
encoded_image = self._encode_image(image, image_format)
|
| 34 |
+
else:
|
| 35 |
+
encoded_image = self._encode_image(image, image_format)
|
| 36 |
+
|
| 37 |
+
return encoded_image, mime_type
|
| 38 |
+
|
| 39 |
+
def create_image_content(self, image):
|
| 40 |
+
base64_image, mime_type = self.encode_image(image)
|
| 41 |
+
return {
|
| 42 |
+
"type": "image_url",
|
| 43 |
+
"image_url": {"url": f"data:{mime_type};base64,{base64_image}"},
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
def prepare_eval_prompt(self, images):
|
| 47 |
+
"""Prepare the evaluation prompt."""
|
| 48 |
+
content = []
|
| 49 |
+
for image_path in images:
|
| 50 |
+
content.append(self.create_image_content(image_path))
|
| 51 |
+
|
| 52 |
+
content.append({"type": "text", "text": self.eval_prompt})
|
| 53 |
+
return content
|
| 54 |
+
|
| 55 |
+
def query(self, images):
|
| 56 |
+
"""Query GPT4o to determine if the ASCII images show the same thing."""
|
| 57 |
+
headers = {
|
| 58 |
+
"Content-Type": "application/json",
|
| 59 |
+
"Authorization": f"Bearer {self.api_key}",
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
context = self.prepare_eval_prompt(images)
|
| 63 |
+
|
| 64 |
+
query_payload = {
|
| 65 |
+
"model": self.model,
|
| 66 |
+
"messages": [{"role": "user", "content": context}],
|
| 67 |
+
"temperature": 0.0,
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
response_data = None
|
| 71 |
+
while response_data is None:
|
| 72 |
+
try:
|
| 73 |
+
response = requests.post(
|
| 74 |
+
self.url,
|
| 75 |
+
headers=headers,
|
| 76 |
+
json=query_payload,
|
| 77 |
+
)
|
| 78 |
+
except (requests.exceptions.JSONDecodeError, requests.exceptions.ConnectionError) as e:
|
| 79 |
+
print(f'Error in requests: {e}')
|
| 80 |
+
print('Retry...')
|
| 81 |
+
continue
|
| 82 |
+
|
| 83 |
+
response_ = response.json()
|
| 84 |
+
if "error" in response_:
|
| 85 |
+
error_info = response_["error"]
|
| 86 |
+
print(
|
| 87 |
+
f"Got error with type: {error_info['type']}. Message: {error_info['message']}"
|
| 88 |
+
)
|
| 89 |
+
print("Retry...")
|
| 90 |
+
else:
|
| 91 |
+
response_data = response_
|
| 92 |
+
break
|
| 93 |
+
|
| 94 |
+
total_tokens = response_data.get("usage", {}).get("total_tokens", "N/A")
|
| 95 |
+
|
| 96 |
+
if response_data and "choices" in response_data:
|
| 97 |
+
choices = response_data["choices"]
|
| 98 |
+
if choices and "message" in choices[0]:
|
| 99 |
+
message_content = choices[0]["message"]["content"]
|
| 100 |
+
print(
|
| 101 |
+
f"gpt-4o judge results: {message_content}; tokens:{total_tokens}"
|
| 102 |
+
)
|
| 103 |
+
else:
|
| 104 |
+
print("gpt-4o judge query failed...")
|
| 105 |
+
message_content = ""
|
| 106 |
+
|
| 107 |
+
return message_content
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class AsciiArtVLMJudgeScore:
|
| 111 |
+
"""Compute the cosine similarity between two pieces of ASCII art."""
|
| 112 |
+
|
| 113 |
+
def __init__(self, metric_config):
|
| 114 |
+
self.model = AsciiArtGPT4OJudge(metric_config)
|
| 115 |
+
|
| 116 |
+
def match(self, response, correct_answer) -> Number:
|
| 117 |
+
"""Compute the cosine similarity between two pieces of ASCII art."""
|
| 118 |
+
if not isinstance(response, str) or not isinstance(correct_answer, str):
|
| 119 |
+
return 0
|
| 120 |
+
if not response:
|
| 121 |
+
return 0
|
| 122 |
+
response_image = ascii_text_to_image(response, 224, 224)
|
| 123 |
+
correct_answer_image = ascii_text_to_image(correct_answer, 224, 224)
|
| 124 |
+
|
| 125 |
+
eval_results = self.model.query([response_image, correct_answer_image])
|
| 126 |
+
return 1 if "yes" in eval_results.lower() else 0
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/common/conversions.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import json
|
| 3 |
+
import re
|
| 4 |
+
from matplotlib import font_manager
|
| 5 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 6 |
+
from ...parsing.common.parsers import parse_json
|
| 7 |
+
from numbers import Number
|
| 8 |
+
from typing import Tuple, Union
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def freeze_structure(obj):
|
| 12 |
+
"""Freeze a structure and make it hashable."""
|
| 13 |
+
if isinstance(obj, dict):
|
| 14 |
+
return frozenset((k, freeze_structure(v)) for k, v in obj.items())
|
| 15 |
+
elif isinstance(obj, (list, tuple)):
|
| 16 |
+
return tuple(freeze_structure(item) for item in obj)
|
| 17 |
+
elif isinstance(obj, set):
|
| 18 |
+
return frozenset(obj)
|
| 19 |
+
else:
|
| 20 |
+
return obj
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def cast_to_set(object) -> set:
|
| 24 |
+
"""Try to cast an object as a set."""
|
| 25 |
+
object = freeze_structure(object)
|
| 26 |
+
if isinstance(object, (frozenset, set, tuple)):
|
| 27 |
+
return set(object)
|
| 28 |
+
return str_to_set(object)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def cast_to_dict(object) -> dict:
|
| 32 |
+
"""Try to cast an object as a dict."""
|
| 33 |
+
if isinstance(object, dict):
|
| 34 |
+
return {key: cast_to_dict(val) for key, val in object.items()}
|
| 35 |
+
elif isinstance(object, str):
|
| 36 |
+
extract_json_attempt = parse_json(object)
|
| 37 |
+
if extract_json_attempt:
|
| 38 |
+
return extract_json_attempt
|
| 39 |
+
return object
|
| 40 |
+
else:
|
| 41 |
+
return object
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def str_to_iterable(func, iterable_str):
|
| 45 |
+
"""Converts a string representation of an iterable to an iterable."""
|
| 46 |
+
if not isinstance(iterable_str, str):
|
| 47 |
+
return func()
|
| 48 |
+
|
| 49 |
+
iterable_str = iterable_str.strip(" ")
|
| 50 |
+
if not iterable_str:
|
| 51 |
+
return func()
|
| 52 |
+
|
| 53 |
+
is_in_iterable = True
|
| 54 |
+
if iterable_str[0] == "(":
|
| 55 |
+
if not iterable_str.endswith(")"):
|
| 56 |
+
return func()
|
| 57 |
+
elif iterable_str[0] == "{":
|
| 58 |
+
if not iterable_str.endswith("}"):
|
| 59 |
+
return func()
|
| 60 |
+
elif iterable_str[0] == "[":
|
| 61 |
+
if not iterable_str.endswith("]"):
|
| 62 |
+
return func()
|
| 63 |
+
else:
|
| 64 |
+
is_in_iterable = False
|
| 65 |
+
|
| 66 |
+
# We may have a nested object, so try to use eval first
|
| 67 |
+
try:
|
| 68 |
+
eval_ = ast.literal_eval(iterable_str)
|
| 69 |
+
if eval_ is None:
|
| 70 |
+
return ""
|
| 71 |
+
if isinstance(eval_, (int, float)):
|
| 72 |
+
eval_ = [
|
| 73 |
+
eval_,
|
| 74 |
+
]
|
| 75 |
+
return func(eval_)
|
| 76 |
+
except (SyntaxError, ValueError):
|
| 77 |
+
if is_in_iterable:
|
| 78 |
+
iterable_str = iterable_str[1:-1]
|
| 79 |
+
items = [item.strip() for item in iterable_str.split(",")]
|
| 80 |
+
return func(items)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def str_to_set(iterable_str) -> set:
|
| 84 |
+
"""Converts a string representation of an iterable to a set."""
|
| 85 |
+
return str_to_iterable(set, iterable_str)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def str_to_list(iterable_str) -> set:
|
| 89 |
+
"""Converts a string representation of an iterable to a set."""
|
| 90 |
+
return str_to_iterable(list, iterable_str)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def str_to_bboxes(bbox_list) -> list:
|
| 94 |
+
if not isinstance(bbox_list, str):
|
| 95 |
+
return []
|
| 96 |
+
try:
|
| 97 |
+
bboxes = ast.literal_eval(bbox_list)
|
| 98 |
+
except (SyntaxError, ValueError):
|
| 99 |
+
try:
|
| 100 |
+
bboxes = json.loads(bbox_list)
|
| 101 |
+
except json.JSONDecodeError:
|
| 102 |
+
return []
|
| 103 |
+
|
| 104 |
+
if len(bboxes) == 4 and isinstance(bboxes[0], Number):
|
| 105 |
+
bboxes = [bboxes]
|
| 106 |
+
|
| 107 |
+
if not isinstance(bboxes, (tuple | list)):
|
| 108 |
+
return []
|
| 109 |
+
|
| 110 |
+
new_bboxes = []
|
| 111 |
+
for bbox in bboxes:
|
| 112 |
+
if not isinstance(bbox, (tuple, list)) or len(bbox) != 4:
|
| 113 |
+
continue
|
| 114 |
+
if any(not isinstance(coord, (float, int)) for coord in bbox):
|
| 115 |
+
continue
|
| 116 |
+
new_bboxes.append(bbox)
|
| 117 |
+
return new_bboxes
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def str_to_coords(coord_list, dim=2) -> list:
|
| 121 |
+
if not isinstance(coord_list, str):
|
| 122 |
+
return []
|
| 123 |
+
try:
|
| 124 |
+
coords = ast.literal_eval(coord_list)
|
| 125 |
+
except SyntaxError:
|
| 126 |
+
try:
|
| 127 |
+
coords = json.loads(coord_list)
|
| 128 |
+
except json.JSONDecodeError:
|
| 129 |
+
return []
|
| 130 |
+
|
| 131 |
+
new_coords = []
|
| 132 |
+
for coord in coords:
|
| 133 |
+
if not isinstance(coord, (tuple, list)) or len(coord) != dim:
|
| 134 |
+
continue
|
| 135 |
+
if any(not isinstance(coord, (float, int)) for coord in coord):
|
| 136 |
+
continue
|
| 137 |
+
new_coords.append(coord)
|
| 138 |
+
return new_coords
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def parse_point_2d_from_xml(xml_string) -> Union[Tuple[float, float], None]:
|
| 142 |
+
"""Parse an (x, y) point from XML formatted like this: <point>x, y</point>"""
|
| 143 |
+
if not isinstance(xml_string, str):
|
| 144 |
+
return None
|
| 145 |
+
|
| 146 |
+
point_pattern = re.compile(r"<point>(.*?)<\/point>")
|
| 147 |
+
matches = point_pattern.findall(xml_string)
|
| 148 |
+
if len(matches) >= 2:
|
| 149 |
+
return None
|
| 150 |
+
|
| 151 |
+
if matches:
|
| 152 |
+
coords = matches[0].split(",")
|
| 153 |
+
if len(coords) != 2:
|
| 154 |
+
return None
|
| 155 |
+
try:
|
| 156 |
+
return tuple(float(coord.strip()) for coord in coords)
|
| 157 |
+
except ValueError:
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def parse_bboxes_from_xml(xml_string: str) -> list:
|
| 162 |
+
|
| 163 |
+
if not isinstance(xml_string, str):
|
| 164 |
+
return []
|
| 165 |
+
|
| 166 |
+
bbox_pattern = re.compile(r"<box>(.*?)<\/box>")
|
| 167 |
+
matches = bbox_pattern.findall(xml_string)
|
| 168 |
+
|
| 169 |
+
new_bboxes = []
|
| 170 |
+
for match in matches:
|
| 171 |
+
|
| 172 |
+
coords = match.split(",")
|
| 173 |
+
if len(coords) != 4:
|
| 174 |
+
continue
|
| 175 |
+
try:
|
| 176 |
+
bbox = tuple(float(coord.strip()) for coord in coords)
|
| 177 |
+
except ValueError:
|
| 178 |
+
continue
|
| 179 |
+
|
| 180 |
+
if len(bbox) == 4 and all(isinstance(coord, float) for coord in bbox):
|
| 181 |
+
new_bboxes.append(bbox)
|
| 182 |
+
|
| 183 |
+
return new_bboxes
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
MONOSPACE_FONTS = ("Courier New", "DejaVu Sans Mono", "Consolas", "SF Mono")
|
| 187 |
+
|
| 188 |
+
MONOSPACE_FONT_FILES = []
|
| 189 |
+
for font_name in MONOSPACE_FONTS:
|
| 190 |
+
try:
|
| 191 |
+
MONOSPACE_FONT_FILES.append(
|
| 192 |
+
font_manager.findfont(font_name, fallback_to_default=False)
|
| 193 |
+
)
|
| 194 |
+
except ValueError:
|
| 195 |
+
continue
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def ascii_text_to_image(
|
| 199 |
+
text,
|
| 200 |
+
width,
|
| 201 |
+
height,
|
| 202 |
+
font_size=20,
|
| 203 |
+
padding=10,
|
| 204 |
+
line_spacing=1,
|
| 205 |
+
bg_color="white",
|
| 206 |
+
text_color="black",
|
| 207 |
+
):
|
| 208 |
+
"""Convert ASCII text into an image."""
|
| 209 |
+
# Split the text into lines
|
| 210 |
+
lines = text.splitlines()
|
| 211 |
+
|
| 212 |
+
# Calculate initial image size based on text
|
| 213 |
+
char_width = font_size * 0.6 # Approximate width of a character
|
| 214 |
+
init_width = int(max(len(line) for line in lines) * char_width + 2 * padding)
|
| 215 |
+
init_height = int(
|
| 216 |
+
(len(lines) * font_size * line_spacing) + 2 * padding
|
| 217 |
+
) # 1.2 for line spacing
|
| 218 |
+
|
| 219 |
+
# Create a new image with the calculated size
|
| 220 |
+
image = Image.new("RGB", (init_width, init_height), color=bg_color)
|
| 221 |
+
draw = ImageDraw.Draw(image)
|
| 222 |
+
|
| 223 |
+
# Load a monospace font
|
| 224 |
+
font = None
|
| 225 |
+
for font_name in MONOSPACE_FONT_FILES:
|
| 226 |
+
try:
|
| 227 |
+
font = ImageFont.truetype(font_name, font_size)
|
| 228 |
+
break
|
| 229 |
+
except IOError:
|
| 230 |
+
continue
|
| 231 |
+
if font is None:
|
| 232 |
+
raise ValueError("Cannot properly render ASCII art: missing monospace font.")
|
| 233 |
+
|
| 234 |
+
# Draw each line of text
|
| 235 |
+
y_text = padding
|
| 236 |
+
for line in lines:
|
| 237 |
+
draw.text((padding, y_text), line, font=font, fill=text_color)
|
| 238 |
+
y_text += font_size * line_spacing # Move to the next line
|
| 239 |
+
|
| 240 |
+
# Resize the image to the specified dimensions
|
| 241 |
+
image = image.resize((width, height), Image.Resampling.LANCZOS)
|
| 242 |
+
|
| 243 |
+
# Convert the image to a NumPy array
|
| 244 |
+
return image
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/common/metrics.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Iterable
|
| 2 |
+
import math
|
| 3 |
+
from numbers import Number
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def calculate_iou(predicted: Iterable[Number], target: Iterable[Number]):
|
| 7 |
+
"""Calculate the IoU between predicted and target bounding boxes."""
|
| 8 |
+
|
| 9 |
+
def box_area(box):
|
| 10 |
+
return (box[2] - box[0]) * (box[3] - box[1])
|
| 11 |
+
|
| 12 |
+
def box_iou(box1, box2):
|
| 13 |
+
# Calculate intersection coordinates
|
| 14 |
+
x1 = max(box1[0], box2[0])
|
| 15 |
+
y1 = max(box1[1], box2[1])
|
| 16 |
+
x2 = min(box1[2], box2[2])
|
| 17 |
+
y2 = min(box1[3], box2[3])
|
| 18 |
+
|
| 19 |
+
# Calculate intersection area
|
| 20 |
+
intersection = max(0, x2 - x1) * max(0, y2 - y1)
|
| 21 |
+
|
| 22 |
+
# Calculate union area
|
| 23 |
+
box1_area = box_area(box1)
|
| 24 |
+
box2_area = box_area(box2)
|
| 25 |
+
union = box1_area + box2_area - intersection
|
| 26 |
+
|
| 27 |
+
# Calculate IoU
|
| 28 |
+
iou = intersection / union if union > 0 else 0
|
| 29 |
+
return iou
|
| 30 |
+
|
| 31 |
+
# Calculate IoU for each pair of predicted and target boxes
|
| 32 |
+
iou_scores = []
|
| 33 |
+
for pred_box in predicted:
|
| 34 |
+
best_iou = 0
|
| 35 |
+
for target_box in target:
|
| 36 |
+
iou = box_iou(pred_box, target_box)
|
| 37 |
+
best_iou = max(best_iou, iou)
|
| 38 |
+
iou_scores.append(best_iou)
|
| 39 |
+
|
| 40 |
+
return iou_scores
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def set_relevance_score(denominator_fn, predicted: Iterable, target: Iterable) -> float:
|
| 44 |
+
"""Calculate the relevance score."""
|
| 45 |
+
pred = set(predicted)
|
| 46 |
+
tget = set(target)
|
| 47 |
+
denominator = denominator_fn(pred, tget)
|
| 48 |
+
if not denominator:
|
| 49 |
+
return 1
|
| 50 |
+
return len(pred & tget) / denominator
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _union_denominator(pred: set, tget: set) -> int:
|
| 54 |
+
return len(pred | tget)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _pred_denominator(pred: set, _: set) -> int:
|
| 58 |
+
return len(pred)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _tget_denominator(_: set, tget: set) -> int:
|
| 62 |
+
return len(tget)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def jaccard_index(predicted: Iterable, target: Iterable) -> float:
|
| 66 |
+
"""Calculate the Jaccard Index."""
|
| 67 |
+
return set_relevance_score(_union_denominator, predicted, target)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def set_precision(predicted: Iterable, target: Iterable) -> float:
|
| 71 |
+
"""Calculate the precision, using sets."""
|
| 72 |
+
return set_relevance_score(_pred_denominator, predicted, target)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def set_recall(predicted: Iterable, target: Iterable) -> float:
|
| 76 |
+
"""Calculate the recall, using sets."""
|
| 77 |
+
return set_relevance_score(_tget_denominator, predicted, target)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def longest_common_prefix(list1: list, list2: list) -> list:
|
| 81 |
+
"""Return the longest common prefix."""
|
| 82 |
+
index_first_difference = next(
|
| 83 |
+
(i for i, (a, b) in enumerate(zip(list1, list2)) if a != b),
|
| 84 |
+
min(len(list1), len(list2)),
|
| 85 |
+
)
|
| 86 |
+
return list1[:index_first_difference]
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def mse(predicted: Number, target: Number) -> Number:
|
| 90 |
+
"""Return the mean squared error."""
|
| 91 |
+
return (predicted - target) ** 2
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def point_distance(predicted: tuple[float, ...], target: tuple[float, ...]):
|
| 95 |
+
"""Return the distance between two points."""
|
| 96 |
+
if len(predicted) != len(target):
|
| 97 |
+
raise ValueError(
|
| 98 |
+
"point_distance: Predicted and target points have different dimensions."
|
| 99 |
+
)
|
| 100 |
+
return math.sqrt(
|
| 101 |
+
sum((comp_res - comp_tar) ** 2 for comp_res, comp_tar in zip(predicted, target))
|
| 102 |
+
)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/common/transformations.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Like-to-like data transformations."""
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
import unicodedata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def remove_def_indef_articles(text: str) -> str:
|
| 8 |
+
"""Remove definite and indefinite articles."""
|
| 9 |
+
text_list = [t for t in text.split(" ") if t.lower() not in {"the", "a"}]
|
| 10 |
+
return " ".join(text_list)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def replace_macrons_with_latex_overline(text: str) -> str:
|
| 14 |
+
"""Replace letters with macrons with the LaTeX bar."""
|
| 15 |
+
result = []
|
| 16 |
+
for char in text:
|
| 17 |
+
if char.isalpha():
|
| 18 |
+
decomposed = unicodedata.normalize("NFD", char)
|
| 19 |
+
if len(decomposed) > 1 and decomposed[1] == "\u0304": # Macron accent
|
| 20 |
+
result.append(f"\\overline{{{decomposed[0]}}}")
|
| 21 |
+
else:
|
| 22 |
+
result.append(char)
|
| 23 |
+
elif char != "\u0304":
|
| 24 |
+
result.append(char)
|
| 25 |
+
else:
|
| 26 |
+
result[-1] = f"\\overline{{{result[-1]}}}"
|
| 27 |
+
|
| 28 |
+
return "".join(result)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def fix_overline_underscores(text: str) -> str:
|
| 32 |
+
"""Puts underscores that are outside \overline within overline."""
|
| 33 |
+
pattern = r"\\overline\{([^}]*)\}_([^{}\\ ]*)"
|
| 34 |
+
return re.sub(pattern, r"\\overline{\1_\2}", text)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# Dictionary mapping Unicode Greek letters to LaTeX equivalents
|
| 38 |
+
greek_to_latex = {
|
| 39 |
+
# Lowercase Greek letters
|
| 40 |
+
"α": "\\alpha",
|
| 41 |
+
"β": "\\beta",
|
| 42 |
+
"γ": "\\gamma",
|
| 43 |
+
"δ": "\\delta",
|
| 44 |
+
"ε": "\\epsilon",
|
| 45 |
+
"ζ": "\\zeta",
|
| 46 |
+
"η": "\\eta",
|
| 47 |
+
"θ": "\\theta",
|
| 48 |
+
"ι": "\\iota",
|
| 49 |
+
"κ": "\\kappa",
|
| 50 |
+
"λ": "\\lambda",
|
| 51 |
+
"μ": "\\mu",
|
| 52 |
+
"ν": "\\nu",
|
| 53 |
+
"ξ": "\\xi",
|
| 54 |
+
"ο": "\\omicron",
|
| 55 |
+
"π": "\\pi",
|
| 56 |
+
"ρ": "\\rho",
|
| 57 |
+
"σ": "\\sigma",
|
| 58 |
+
"τ": "\\tau",
|
| 59 |
+
"υ": "\\upsilon",
|
| 60 |
+
"φ": "\\phi",
|
| 61 |
+
"χ": "\\chi",
|
| 62 |
+
"ψ": "\\psi",
|
| 63 |
+
"ω": "\\omega",
|
| 64 |
+
# Uppercase Greek letters
|
| 65 |
+
"Α": "\\Alpha",
|
| 66 |
+
"Β": "\\Beta",
|
| 67 |
+
"Γ": "\\Gamma",
|
| 68 |
+
"Δ": "\\Delta",
|
| 69 |
+
"Ε": "\\Epsilon",
|
| 70 |
+
"Ζ": "\\Zeta",
|
| 71 |
+
"Η": "\\Eta",
|
| 72 |
+
"Θ": "\\Theta",
|
| 73 |
+
"Ι": "\\Iota",
|
| 74 |
+
"Κ": "\\Kappa",
|
| 75 |
+
"Λ": "\\Lambda",
|
| 76 |
+
"Μ": "\\Mu",
|
| 77 |
+
"Ν": "\\Nu",
|
| 78 |
+
"Ξ": "\\Xi",
|
| 79 |
+
"Ο": "\\Omicron",
|
| 80 |
+
"Π": "\\Pi",
|
| 81 |
+
"Ρ": "\\Rho",
|
| 82 |
+
"Σ": "\\Sigma",
|
| 83 |
+
"Τ": "\\Tau",
|
| 84 |
+
"Υ": "\\Upsilon",
|
| 85 |
+
"Φ": "\\Phi",
|
| 86 |
+
"Χ": "\\Chi",
|
| 87 |
+
"Ψ": "\\Psi",
|
| 88 |
+
"Ω": "\\Omega",
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def replace_greek_letters(text: str) -> str:
|
| 93 |
+
"""Replace Greek letters in Unicode with their LaTeX equivalents."""
|
| 94 |
+
return re.sub(r"[α-ωΑ-Ω]", lambda match: greek_to_latex[match.group()] + " ", text)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def remove_latex_math_delimiters(latex_str):
|
| 98 |
+
# Pattern to match \begin{...}[...] and \end{...}[...] commands
|
| 99 |
+
env_pattern = r"\\(begin|end)\{.*?\}(?:\[[^\[\]]*\])?"
|
| 100 |
+
latex_str = re.sub(env_pattern, "", latex_str)
|
| 101 |
+
|
| 102 |
+
# Remove \( and \)
|
| 103 |
+
inline_math_pattern = r"\\\(|\\\)"
|
| 104 |
+
latex_str = re.sub(inline_math_pattern, "", latex_str)
|
| 105 |
+
|
| 106 |
+
# Remove \[ and \]
|
| 107 |
+
display_math_pattern = r"\\\[|\\\]"
|
| 108 |
+
latex_str = re.sub(display_math_pattern, "", latex_str)
|
| 109 |
+
|
| 110 |
+
return latex_str
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def normalize_latex(text: str) -> str:
|
| 114 |
+
"""Normalize the LaTeX expression."""
|
| 115 |
+
text = text.replace("\\bar", "\\overline")
|
| 116 |
+
text = replace_macrons_with_latex_overline(text)
|
| 117 |
+
text = fix_overline_underscores(text)
|
| 118 |
+
text = replace_greek_letters(text)
|
| 119 |
+
text = remove_latex_math_delimiters(text)
|
| 120 |
+
return text
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_equality.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import cast_to_dict
|
| 2 |
+
from .simple_str_match import ExactStrMatch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class DictEquality:
|
| 6 |
+
"""Calculates the exact string match across the dict.
|
| 7 |
+
|
| 8 |
+
1. Calculates the exact match for all keys in the solution
|
| 9 |
+
2. Calculates the total, then divides by the size of the solution
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def match(cls, responses, targets) -> float:
|
| 14 |
+
"""Return the aggregated Jaccard index between targets and responses."""
|
| 15 |
+
responses = cast_to_dict(responses)
|
| 16 |
+
targets = cast_to_dict(targets)
|
| 17 |
+
|
| 18 |
+
if not isinstance(responses, dict):
|
| 19 |
+
return 0
|
| 20 |
+
|
| 21 |
+
return 1 if responses == targets else 0
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class DictPrecision:
|
| 25 |
+
|
| 26 |
+
@classmethod
|
| 27 |
+
def match(cls, responses, targets) -> float:
|
| 28 |
+
"""Return the aggregated Jaccard index between targets and responses."""
|
| 29 |
+
responses = cast_to_dict(responses)
|
| 30 |
+
targets = cast_to_dict(targets)
|
| 31 |
+
|
| 32 |
+
if not isinstance(responses, dict):
|
| 33 |
+
return 0
|
| 34 |
+
|
| 35 |
+
if len(responses) == 0:
|
| 36 |
+
return 0
|
| 37 |
+
|
| 38 |
+
matched = 0
|
| 39 |
+
for key, val in responses.items():
|
| 40 |
+
if key in targets:
|
| 41 |
+
if ExactStrMatch.match(val, targets[key]):
|
| 42 |
+
matched += 1
|
| 43 |
+
|
| 44 |
+
return matched / len(responses)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_exact_match_agg_recall.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import cast_to_dict
|
| 2 |
+
from .exact_str_match import ExactStrMatch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class DictExactStrMatchAggRecall:
|
| 6 |
+
"""Calculates the exact string match across the dict.
|
| 7 |
+
|
| 8 |
+
1. Calculates the exact match for all keys in the solution
|
| 9 |
+
2. Calculates the total, then divides by the size of the solution
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def match(cls, responses, targets) -> float:
|
| 14 |
+
"""Return the aggregated Jaccard index between targets and responses."""
|
| 15 |
+
responses = cast_to_dict(responses)
|
| 16 |
+
targets = cast_to_dict(targets)
|
| 17 |
+
|
| 18 |
+
if not isinstance(responses, dict):
|
| 19 |
+
return 0
|
| 20 |
+
|
| 21 |
+
num_keys = 0
|
| 22 |
+
total_score = 0
|
| 23 |
+
for key, answer in targets.items():
|
| 24 |
+
total_score += ExactStrMatch.match(responses.get(key), answer)
|
| 25 |
+
num_keys += 1
|
| 26 |
+
|
| 27 |
+
return total_score / num_keys
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/general_numerical_match.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from .simple_str_match import SimpleStrMatch
|
| 3 |
+
|
| 4 |
+
from sympy.parsing.latex import parse_latex
|
| 5 |
+
import math
|
| 6 |
+
import multiprocessing
|
| 7 |
+
|
| 8 |
+
import signal
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TimeoutException(Exception):
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def timeout_handler(signum, frame):
|
| 16 |
+
raise TimeoutException()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
E = 2.718
|
| 20 |
+
|
| 21 |
+
############## Begin
|
| 22 |
+
# Numerical comparison from https://github.com/TIGER-AI-Lab/MAmmoTH/blob/main/math_eval/number_utils.py
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def run_eval(expression, output):
|
| 26 |
+
try:
|
| 27 |
+
# Safely evaluate the expression
|
| 28 |
+
result = eval(expression)
|
| 29 |
+
output.put(result)
|
| 30 |
+
except Exception as e:
|
| 31 |
+
output.put(e)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def eval_with_timeout(expression, timeout=5):
|
| 35 |
+
# Create a multiprocessing.Queue to receive the output
|
| 36 |
+
output = multiprocessing.Queue()
|
| 37 |
+
|
| 38 |
+
# Define and start the process
|
| 39 |
+
process = multiprocessing.Process(target=run_eval, args=(expression, output))
|
| 40 |
+
process.start()
|
| 41 |
+
|
| 42 |
+
# Wait for the process to complete or timeout
|
| 43 |
+
process.join(timeout)
|
| 44 |
+
|
| 45 |
+
if process.is_alive():
|
| 46 |
+
# Terminate the process
|
| 47 |
+
process.terminate()
|
| 48 |
+
process.join()
|
| 49 |
+
return "Timeout or error during evaluation"
|
| 50 |
+
|
| 51 |
+
# Get result from the queue
|
| 52 |
+
try:
|
| 53 |
+
return output.get_nowait()
|
| 54 |
+
except Exception as e:
|
| 55 |
+
return "Error retrieving result"
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def compare_two_list(pred, gt):
|
| 59 |
+
if not isinstance(pred, list):
|
| 60 |
+
return False
|
| 61 |
+
elif len(pred) != len(gt):
|
| 62 |
+
return False
|
| 63 |
+
elif any([not isinstance(x, (int, float)) for x in pred]):
|
| 64 |
+
return False
|
| 65 |
+
else:
|
| 66 |
+
pred = sorted(pred)
|
| 67 |
+
gt = sorted(gt)
|
| 68 |
+
return all([compare_two_numbers(p, g) for p, g in zip(pred, gt)])
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def compare_two_numbers(p, gt):
|
| 72 |
+
try:
|
| 73 |
+
if math.isnan(p):
|
| 74 |
+
return False
|
| 75 |
+
else:
|
| 76 |
+
return within_eps(pred=p, gt=gt)
|
| 77 |
+
except Exception:
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def within_eps(pred: float, gt: float):
|
| 82 |
+
eps = abs(gt) * 0.01
|
| 83 |
+
if pred >= gt - eps and pred <= gt + eps:
|
| 84 |
+
return True
|
| 85 |
+
else:
|
| 86 |
+
return False
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def clean_units(pred_str: str):
|
| 90 |
+
"""Clean the units in the number."""
|
| 91 |
+
|
| 92 |
+
def convert_pi_to_number(code_string):
|
| 93 |
+
code_string = code_string.replace("\\pi", "π")
|
| 94 |
+
# Replace \pi or π not preceded by a digit or } with 3.14
|
| 95 |
+
code_string = re.sub(r"(?<![\d}])\\?π", "3.14", code_string)
|
| 96 |
+
# Replace instances where π is preceded by a digit but without a multiplication symbol, e.g., "3π" -> "3*3.14"
|
| 97 |
+
code_string = re.sub(r"(\d)(\\?π)", r"\1*3.14", code_string)
|
| 98 |
+
# Handle cases where π is within braces or followed by a multiplication symbol
|
| 99 |
+
# This replaces "{π}" with "3.14" directly and "3*π" with "3*3.14"
|
| 100 |
+
code_string = re.sub(r"\{(\\?π)\}", "3.14", code_string)
|
| 101 |
+
code_string = re.sub(r"\*(\\?π)", "*3.14", code_string)
|
| 102 |
+
return code_string
|
| 103 |
+
|
| 104 |
+
pred_str = convert_pi_to_number(pred_str)
|
| 105 |
+
pred_str = pred_str.replace("%", "/100")
|
| 106 |
+
pred_str = pred_str.replace("$", "")
|
| 107 |
+
pred_str = pred_str.replace("¥", "")
|
| 108 |
+
pred_str = pred_str.replace("°C", "")
|
| 109 |
+
pred_str = pred_str.replace(" C", "")
|
| 110 |
+
pred_str = pred_str.replace("°", "")
|
| 111 |
+
return pred_str
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def number_it(num):
|
| 115 |
+
if isinstance(num, (int, float)):
|
| 116 |
+
return num
|
| 117 |
+
|
| 118 |
+
num = clean_units(num)
|
| 119 |
+
try:
|
| 120 |
+
num = str(parse_latex(num))
|
| 121 |
+
except Exception:
|
| 122 |
+
pass
|
| 123 |
+
|
| 124 |
+
if floatify(num) is not None:
|
| 125 |
+
return floatify(num)
|
| 126 |
+
else:
|
| 127 |
+
try:
|
| 128 |
+
num = eval_with_timeout(num)
|
| 129 |
+
if isinstance(num, list) or isinstance(num, tuple):
|
| 130 |
+
return num # return num list
|
| 131 |
+
if floatify(num) is not None:
|
| 132 |
+
return floatify(num)
|
| 133 |
+
else:
|
| 134 |
+
return None
|
| 135 |
+
except Exception:
|
| 136 |
+
return None
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def floatify(num: str):
|
| 140 |
+
try:
|
| 141 |
+
num = float(num)
|
| 142 |
+
if num.is_integer():
|
| 143 |
+
return round(num)
|
| 144 |
+
else:
|
| 145 |
+
return num
|
| 146 |
+
except Exception:
|
| 147 |
+
return None
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def remove_latex_math_brackets(latex_str):
|
| 151 |
+
"""
|
| 152 |
+
Removes LaTeX math mode delimiters (\( ... \) and \[ ... \]) from a string
|
| 153 |
+
while preserving the contents inside the delimiters.
|
| 154 |
+
If no such delimiters are found, the original string is returned.
|
| 155 |
+
"""
|
| 156 |
+
# Regex pattern for inline math \( ... \)
|
| 157 |
+
inline_pattern = re.compile(r"\\\((.*?)\\\)")
|
| 158 |
+
# Regex pattern for TeX inline math $...$
|
| 159 |
+
tex_inline_pattern = re.compile(r"$(.*?)$")
|
| 160 |
+
# Regex pattern for display math \[ ... \]
|
| 161 |
+
display_pattern = re.compile(r"\\\[(.*?)\\\]")
|
| 162 |
+
|
| 163 |
+
latex_patterns = (inline_pattern, tex_inline_pattern, display_pattern)
|
| 164 |
+
|
| 165 |
+
if any(pattern.search(latex_str) for pattern in latex_patterns):
|
| 166 |
+
# Remove inline math mode brackets
|
| 167 |
+
latex_str = inline_pattern.sub(r"\1", latex_str)
|
| 168 |
+
# Remove display math mode brackets
|
| 169 |
+
latex_str = display_pattern.sub(r"\1", latex_str)
|
| 170 |
+
return latex_str
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def parse_assignment(expression):
|
| 174 |
+
# match the content after "=", "≈", or "\approx"
|
| 175 |
+
pattern = r"(?:=|≈|\\approx)\s*(.+)"
|
| 176 |
+
|
| 177 |
+
match = re.search(pattern, expression)
|
| 178 |
+
if match:
|
| 179 |
+
# Return the content after the sign
|
| 180 |
+
return match.group(1).strip()
|
| 181 |
+
else:
|
| 182 |
+
return expression
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
############## End
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class GeneralSingleNumericalMatch:
|
| 189 |
+
"""
|
| 190 |
+
Extract the results from ```\\boxed{xxxx}``` and match with the anaswer
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
@classmethod
|
| 194 |
+
def match(cls, responses, targets) -> float:
|
| 195 |
+
if not isinstance(responses, str):
|
| 196 |
+
responses = str(responses)
|
| 197 |
+
responses = remove_latex_math_brackets(responses)
|
| 198 |
+
responses = parse_assignment(responses)
|
| 199 |
+
targets = remove_latex_math_brackets(targets)
|
| 200 |
+
targets = parse_assignment(targets)
|
| 201 |
+
res = number_it(responses)
|
| 202 |
+
tgt = number_it(targets)
|
| 203 |
+
|
| 204 |
+
if res is not None and tgt is not None:
|
| 205 |
+
if (
|
| 206 |
+
isinstance(res, list)
|
| 207 |
+
and isinstance(tgt, list)
|
| 208 |
+
or isinstance(res, tuple)
|
| 209 |
+
and isinstance(tgt, tuple)
|
| 210 |
+
):
|
| 211 |
+
score = float(compare_two_list(res, tgt))
|
| 212 |
+
else:
|
| 213 |
+
score = float(compare_two_numbers(res, tgt))
|
| 214 |
+
else:
|
| 215 |
+
score = SimpleStrMatch.match(responses, targets)
|
| 216 |
+
|
| 217 |
+
return score
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class BoxedSingleNumericalMatch:
|
| 221 |
+
"""
|
| 222 |
+
Extract the results from ```\\boxed{xxxx}``` and match with the anaswer
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
@staticmethod
|
| 226 |
+
def parse_boxed_content(text):
|
| 227 |
+
###
|
| 228 |
+
# Pattern: r'\\boxed\{((?:[^\{\}]+|\{[^\{\}]*\})*)\}':
|
| 229 |
+
# \\boxed\{: Matches the literal \boxed{.
|
| 230 |
+
# ((?:[^\{\}]+|\{[^\{\}]*\})*): This part matches the content inside the \boxed{}.
|
| 231 |
+
# (?:...): A non-capturing group that allows us to match both non-brace content and brace-enclosed content.
|
| 232 |
+
# [^\{\}]+: Matches any content that is not an opening { or closing } brace.
|
| 233 |
+
# \{[^\{\}]*\}: Matches balanced braces containing non-nested content (e.g., {5} or {3} in the LaTeX expression \frac{5}{3}).
|
| 234 |
+
###
|
| 235 |
+
pattern = r"\\boxed\{((?:[^\{\}]+|\{[^\{\}]*\})*)\}"
|
| 236 |
+
match = re.search(pattern, text)
|
| 237 |
+
return match.group(1) if match else text
|
| 238 |
+
|
| 239 |
+
@classmethod
|
| 240 |
+
def match(cls, responses, targets, timeout_duration=10) -> float:
|
| 241 |
+
if not isinstance(responses, str):
|
| 242 |
+
responses = str(responses)
|
| 243 |
+
signal.signal(signal.SIGALRM, timeout_handler)
|
| 244 |
+
signal.alarm(timeout_duration) # Set the timeout duration in seconds
|
| 245 |
+
try:
|
| 246 |
+
parsed_res = cls.parse_boxed_content(responses)
|
| 247 |
+
targets = cls.parse_boxed_content(targets)
|
| 248 |
+
score = GeneralSingleNumericalMatch.match(parsed_res, targets)
|
| 249 |
+
return score
|
| 250 |
+
except TimeoutException:
|
| 251 |
+
return SimpleStrMatch.match(responses, targets)
|
| 252 |
+
finally:
|
| 253 |
+
signal.alarm(0)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/longest_common_list_prefix_ratio.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import str_to_list
|
| 2 |
+
from .common.metrics import longest_common_prefix
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class LongestCommonListPrefixRatio:
|
| 6 |
+
"""Determines how much of the first part of the list
|
| 7 |
+
was predicted correctly.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
@classmethod
|
| 11 |
+
def match(cls, responses, targets) -> int:
|
| 12 |
+
"""Exact match between targets and responses."""
|
| 13 |
+
responses = str_to_list(responses)
|
| 14 |
+
targets = str_to_list(targets)
|
| 15 |
+
return len(longest_common_prefix(responses, targets)) / len(targets)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/mse.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import numpy as np
|
| 3 |
+
import math
|
| 4 |
+
from .common.metrics import mse
|
| 5 |
+
from .common.conversions import str_to_list
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class MSE:
|
| 9 |
+
"""Mean Squared Error."""
|
| 10 |
+
|
| 11 |
+
@staticmethod
|
| 12 |
+
def match(response: str, correct_answer: str) -> int:
|
| 13 |
+
"""Return the mean squared error."""
|
| 14 |
+
try:
|
| 15 |
+
return mse(ast.literal_eval(response), ast.literal_eval(correct_answer))
|
| 16 |
+
except (SyntaxError, ValueError):
|
| 17 |
+
return 0
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class NormalizedRMSE:
|
| 21 |
+
"""Mean Squared Error."""
|
| 22 |
+
|
| 23 |
+
MIN = 0.0
|
| 24 |
+
MAX = 0.1
|
| 25 |
+
|
| 26 |
+
@classmethod
|
| 27 |
+
def match(cls, response: str, correct_answer: str) -> int:
|
| 28 |
+
"""Return the mean squared error."""
|
| 29 |
+
try:
|
| 30 |
+
mse_val = mse(ast.literal_eval(response), ast.literal_eval(correct_answer))
|
| 31 |
+
rmse = np.clip(np.sqrt(mse_val), cls.MIN, cls.MAX)
|
| 32 |
+
norm_rmse = 1 - (rmse - cls.MIN) / (cls.MAX - cls.MIN)
|
| 33 |
+
return norm_rmse
|
| 34 |
+
except (SyntaxError, ValueError):
|
| 35 |
+
return 0
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class AngleSeqFloatRMSE:
|
| 39 |
+
"""Whether the sequence of numbers is close enough to the real answer."""
|
| 40 |
+
|
| 41 |
+
MIN = 0.0
|
| 42 |
+
MAX = 10.0
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
def match(cls, responses, targets) -> float:
|
| 46 |
+
"""Determines whether the sequence of floats are close enough to the real answer."""
|
| 47 |
+
responses = str_to_list(responses)
|
| 48 |
+
targets = str_to_list(targets)
|
| 49 |
+
|
| 50 |
+
if len(responses) != len(targets):
|
| 51 |
+
return 0
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
res = np.array(responses)
|
| 55 |
+
tgt = np.array(targets)
|
| 56 |
+
rmse = np.sqrt(mse(res, tgt)).sum() / len(targets)
|
| 57 |
+
except: # cannot obtain the rmse from the response, return 0
|
| 58 |
+
return 0
|
| 59 |
+
|
| 60 |
+
rmse = np.clip(rmse, cls.MIN, cls.MAX)
|
| 61 |
+
norm_rmse = 1 - (rmse - cls.MIN) / (cls.MAX - cls.MIN)
|
| 62 |
+
if math.isnan(norm_rmse):
|
| 63 |
+
return 0
|
| 64 |
+
return norm_rmse
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/multi_ref_phrase.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numbers import Number
|
| 2 |
+
from .common.conversions import str_to_iterable
|
| 3 |
+
from .simple_str_match import SimpleStrMatch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def replace_potential_chinese_comma(input_string):
|
| 7 |
+
return input_string.replace(",", ",")
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MultipleReferencePhraseEval:
|
| 11 |
+
"""
|
| 12 |
+
Check the response with multiple correct references
|
| 13 |
+
As long as one is matched, the score is 1, otherwise the score is 0
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def match(response, targets) -> Number:
|
| 18 |
+
targets = replace_potential_chinese_comma(targets)
|
| 19 |
+
refs = str_to_iterable(list, targets)
|
| 20 |
+
matched = False
|
| 21 |
+
for ref in refs:
|
| 22 |
+
str_ref = ref if isinstance(ref, str) else str(ref)
|
| 23 |
+
if SimpleStrMatch.match(response, str_ref):
|
| 24 |
+
matched = True
|
| 25 |
+
break
|
| 26 |
+
return 1 if matched else 0
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/nbbox_iou.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import ast
|
| 3 |
+
from .common.conversions import str_to_bboxes
|
| 4 |
+
from .common.metrics import calculate_iou
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class NbboxIouTuple:
|
| 9 |
+
"""Calculates the IoU, for all bounding boxes, for all predicted bounding boxes.
|
| 10 |
+
For each predicted bounding box, it uses the IoU with the target bounding box with
|
| 11 |
+
the highest IoU.
|
| 12 |
+
|
| 13 |
+
Assumes that co-ordinates are normalized between 0 and 1 and that the bounding boxes
|
| 14 |
+
are of the form (x1, y1, x2, y2), where (x1, y1) is the top-left corner and (x2, y2)
|
| 15 |
+
is the bottom-right.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
@classmethod
|
| 19 |
+
def match(cls, responses, targets) -> float:
|
| 20 |
+
"""Exact match between targets and responses."""
|
| 21 |
+
logging.debug(f"{responses=}, {targets=}")
|
| 22 |
+
if not isinstance(responses, (tuple | list)):
|
| 23 |
+
responses = str_to_bboxes(responses)
|
| 24 |
+
if not isinstance(targets, (tuple | list)):
|
| 25 |
+
targets = str_to_bboxes(targets)
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
iou_scores = calculate_iou(responses, targets)
|
| 29 |
+
except:
|
| 30 |
+
return 0
|
| 31 |
+
|
| 32 |
+
if not iou_scores:
|
| 33 |
+
return 0
|
| 34 |
+
|
| 35 |
+
# Take the mean IoU score for now.
|
| 36 |
+
return sum(iou_scores) / len(iou_scores)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class NbboxIouSingle:
|
| 40 |
+
"""
|
| 41 |
+
Single bbox IoU metric
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
def match(cls, responses, targets) -> float:
|
| 46 |
+
"""Exact match between targets and responses."""
|
| 47 |
+
logging.debug(f"{responses=}, {targets=}")
|
| 48 |
+
targets = ast.literal_eval(targets)
|
| 49 |
+
try:
|
| 50 |
+
responses = ast.literal_eval(responses)
|
| 51 |
+
except SyntaxError:
|
| 52 |
+
return 0
|
| 53 |
+
|
| 54 |
+
try:
|
| 55 |
+
iou_scores = calculate_iou(
|
| 56 |
+
[
|
| 57 |
+
responses,
|
| 58 |
+
],
|
| 59 |
+
[
|
| 60 |
+
targets,
|
| 61 |
+
],
|
| 62 |
+
)
|
| 63 |
+
if not iou_scores:
|
| 64 |
+
return 0
|
| 65 |
+
except:
|
| 66 |
+
return 0
|
| 67 |
+
|
| 68 |
+
# Take the mean IoU score for now.
|
| 69 |
+
return sum(iou_scores) / len(iou_scores)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class NbboxIouSequence:
|
| 73 |
+
"""
|
| 74 |
+
Metric for a sequence of bboxes (used for single object tracking).
|
| 75 |
+
The number of predicted boxes must match the ground truth.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
@classmethod
|
| 79 |
+
def match(cls, responses, targets) -> float:
|
| 80 |
+
"""Exact match between targets and responses."""
|
| 81 |
+
if not isinstance(responses, (tuple | list)):
|
| 82 |
+
responses = str(responses) if not isinstance(responses, str) else responses
|
| 83 |
+
responses = str_to_bboxes(responses)
|
| 84 |
+
if not isinstance(targets, (tuple | list)):
|
| 85 |
+
targets = str_to_bboxes(targets)
|
| 86 |
+
|
| 87 |
+
if len(targets) != len(responses):
|
| 88 |
+
return 0
|
| 89 |
+
|
| 90 |
+
scores = []
|
| 91 |
+
for res, tgt in zip(responses, targets):
|
| 92 |
+
scores.append(
|
| 93 |
+
calculate_iou(
|
| 94 |
+
[
|
| 95 |
+
res,
|
| 96 |
+
],
|
| 97 |
+
[
|
| 98 |
+
tgt,
|
| 99 |
+
],
|
| 100 |
+
)
|
| 101 |
+
)
|
| 102 |
+
avg_iou = np.mean(scores)
|
| 103 |
+
|
| 104 |
+
return avg_iou
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/near_str_match.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import rapidfuzz
|
| 2 |
+
import unidecode
|
| 3 |
+
from .common.transformations import remove_def_indef_articles
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def approximate(text: str) -> str:
|
| 7 |
+
"""Return an approximation of the original string."""
|
| 8 |
+
return unidecode.unidecode(remove_def_indef_articles(text)).lower()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class NearStrMatch:
|
| 12 |
+
"""Near string matching."""
|
| 13 |
+
|
| 14 |
+
@staticmethod
|
| 15 |
+
def match(response, correct_answer: str, threshold=0.9) -> int:
|
| 16 |
+
"""Simple string match between response and correct_answer."""
|
| 17 |
+
if not isinstance(response, str) or not isinstance(correct_answer, str):
|
| 18 |
+
return 0
|
| 19 |
+
response = approximate(response)
|
| 20 |
+
correct_answer = approximate(correct_answer)
|
| 21 |
+
return rapidfuzz.distance.DamerauLevenshtein.normalized_similarity(
|
| 22 |
+
response, correct_answer, score_cutoff=threshold
|
| 23 |
+
)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/number_rel_diff_ratio.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import math
|
| 3 |
+
from numbers import Number
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class NumberRelDiffRatio:
|
| 7 |
+
"""Number relative difference ratio scoring = min(0, 1 - |pred - gt| / gt)"""
|
| 8 |
+
|
| 9 |
+
@staticmethod
|
| 10 |
+
def match(response: str | Number, correct_answer: str) -> int:
|
| 11 |
+
"""Return the relative difference ratio."""
|
| 12 |
+
try:
|
| 13 |
+
if isinstance(response, Number):
|
| 14 |
+
pred = response
|
| 15 |
+
else:
|
| 16 |
+
pred = ast.literal_eval(response)
|
| 17 |
+
if not isinstance(pred, Number):
|
| 18 |
+
return 0
|
| 19 |
+
gt = ast.literal_eval(correct_answer)
|
| 20 |
+
return max(0, 1 - math.fabs((pred - gt) / gt))
|
| 21 |
+
except (SyntaxError, ValueError):
|
| 22 |
+
return 0
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/positive_int_match.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class PositiveIntMatch:
|
| 5 |
+
"""Positive int matching."""
|
| 6 |
+
|
| 7 |
+
@staticmethod
|
| 8 |
+
def match(response: str, correct_answer: str) -> int:
|
| 9 |
+
"""If the correct answer or response is a positive integer, then it returns if the predicted and correct answers are identical.
|
| 10 |
+
|
| 11 |
+
Otherwise, it returns -1.
|
| 12 |
+
"""
|
| 13 |
+
try:
|
| 14 |
+
response_obj = ast.literal_eval(response)
|
| 15 |
+
except (SyntaxError, ValueError):
|
| 16 |
+
return 0
|
| 17 |
+
|
| 18 |
+
if not correct_answer:
|
| 19 |
+
return 0
|
| 20 |
+
|
| 21 |
+
correct_answer_obj = ast.literal_eval(correct_answer)
|
| 22 |
+
|
| 23 |
+
assert isinstance(correct_answer_obj, int)
|
| 24 |
+
if not isinstance(response_obj, int):
|
| 25 |
+
return 0
|
| 26 |
+
|
| 27 |
+
# We only want to score the fields with a positive amount
|
| 28 |
+
if correct_answer_obj <= 0 and response_obj <= 0:
|
| 29 |
+
return -1
|
| 30 |
+
|
| 31 |
+
return 1 if response_obj == correct_answer_obj else 0
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/set_precision.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import cast_to_set
|
| 2 |
+
from .common.metrics import set_precision
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class SetPrecision:
|
| 6 |
+
"""Calculates the set precision for iterables."""
|
| 7 |
+
|
| 8 |
+
@classmethod
|
| 9 |
+
def match(cls, responses, targets) -> float:
|
| 10 |
+
"""Exact match between targets and responses."""
|
| 11 |
+
if responses is None:
|
| 12 |
+
return 0
|
| 13 |
+
responses = cast_to_set(responses)
|
| 14 |
+
targets = cast_to_set(targets)
|
| 15 |
+
|
| 16 |
+
return set_precision(responses, targets)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/symbolic_planning.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
PARAM_LIST_MATCHER = re.compile(r"((?:\?\S+\s*)+)(?:-\s+([^\?$]+)\s*)?")
|
| 4 |
+
PARAM_NAME_MATCHER = re.compile(r"\?([^\s\?\)]+)\s*")
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
##### Parsing functions and parentheses matching
|
| 8 |
+
def parse_pddl_param_list(s):
|
| 9 |
+
s = s.strip()
|
| 10 |
+
assert s[0] == "(" and s[-1] == ")"
|
| 11 |
+
s = s[1:-1]
|
| 12 |
+
param_type_dict = {}
|
| 13 |
+
for params, p_type in PARAM_LIST_MATCHER.findall(s):
|
| 14 |
+
for p in PARAM_NAME_MATCHER.findall(params):
|
| 15 |
+
p_type = p_type.strip()
|
| 16 |
+
if p_type.startswith("("):
|
| 17 |
+
p_type = p_type[1:-1].strip()
|
| 18 |
+
assert "either"
|
| 19 |
+
param_type_dict[p] = re.split(r"\s+", p_type)[1:]
|
| 20 |
+
else:
|
| 21 |
+
param_type_dict[p] = p_type
|
| 22 |
+
return s.split("?")[0].strip(), param_type_dict
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def parse_outer_inner_str(s, str_ender, inner_starter, inner_ender):
|
| 26 |
+
inner_count = 0
|
| 27 |
+
start_id = 0
|
| 28 |
+
matched_str = []
|
| 29 |
+
for i, c in enumerate(s):
|
| 30 |
+
if inner_count == 0 and c == str_ender:
|
| 31 |
+
return s[: i + 1], matched_str, i + 1
|
| 32 |
+
elif c == inner_starter:
|
| 33 |
+
if inner_count == 0:
|
| 34 |
+
start_id = i
|
| 35 |
+
inner_count += 1
|
| 36 |
+
elif c == inner_ender:
|
| 37 |
+
inner_count -= 1
|
| 38 |
+
if inner_count == 0:
|
| 39 |
+
matched_str.append(s[start_id : i + 1])
|
| 40 |
+
return s, matched_str, len(s)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def parse_pddl_attr_from_string(
|
| 44 |
+
s,
|
| 45 |
+
attr_starter="(:",
|
| 46 |
+
attr_ender=")",
|
| 47 |
+
inner_starter="(",
|
| 48 |
+
inner_ender=")",
|
| 49 |
+
overlap=False,
|
| 50 |
+
):
|
| 51 |
+
s_attr = s.split(attr_starter)
|
| 52 |
+
if len(s_attr) == 1:
|
| 53 |
+
return "", []
|
| 54 |
+
elif len(s_attr) == 2:
|
| 55 |
+
outer_str, inner_str, _ = parse_outer_inner_str(
|
| 56 |
+
s_attr[1], attr_ender, inner_starter, inner_ender
|
| 57 |
+
)
|
| 58 |
+
return attr_starter + outer_str, inner_str
|
| 59 |
+
else:
|
| 60 |
+
matched_dict = {}
|
| 61 |
+
outer_list = []
|
| 62 |
+
if not overlap:
|
| 63 |
+
while len(s.split(attr_starter)) > 1:
|
| 64 |
+
s = s.split(attr_starter, 1)[1]
|
| 65 |
+
name = re.split(r"\s+", s.strip())[0]
|
| 66 |
+
outer_str, inner_str, end_point = parse_outer_inner_str(
|
| 67 |
+
s, attr_ender, inner_starter, inner_ender
|
| 68 |
+
)
|
| 69 |
+
outer_list.append(attr_starter + outer_str)
|
| 70 |
+
matched_dict[name] = inner_str
|
| 71 |
+
s = s[end_point:]
|
| 72 |
+
else:
|
| 73 |
+
for seg in s_attr[1:]:
|
| 74 |
+
name = re.split(r"\s+", seg.strip())[0]
|
| 75 |
+
outer_str, inner_str, _ = parse_outer_inner_str(
|
| 76 |
+
seg, attr_ender, inner_starter, inner_ender
|
| 77 |
+
)
|
| 78 |
+
outer_list.append(attr_starter + outer_str)
|
| 79 |
+
matched_dict[name] = inner_str
|
| 80 |
+
return outer_list, matched_dict
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def remove_type_in_cnf(s):
|
| 84 |
+
s_split_type = s.split(" - ")
|
| 85 |
+
if len(s_split_type) > 1:
|
| 86 |
+
for i in range(1, len(s_split_type)):
|
| 87 |
+
if len(s_split_type[i].strip().split(")")[0].split()) == 1:
|
| 88 |
+
s_split_type[i] = ")" + s_split_type[i].strip().split(")", 1)[1]
|
| 89 |
+
else:
|
| 90 |
+
s_split_type[i] = " " + s_split_type[i].strip().split(" ", 1)[1]
|
| 91 |
+
return "".join(s_split_type).strip()
|
| 92 |
+
else:
|
| 93 |
+
return s
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def split_cnf_by_parentheses(s):
|
| 97 |
+
assert s.startswith("(and")
|
| 98 |
+
matches = set()
|
| 99 |
+
p_count = 0
|
| 100 |
+
clause_start_id = 0
|
| 101 |
+
for i in range(len(s)):
|
| 102 |
+
if s[i] == "(":
|
| 103 |
+
p_count += 1
|
| 104 |
+
if p_count == 2:
|
| 105 |
+
clause_start_id = i
|
| 106 |
+
elif s[i] == ")":
|
| 107 |
+
p_count -= 1
|
| 108 |
+
if p_count == 0:
|
| 109 |
+
break
|
| 110 |
+
elif p_count == 1:
|
| 111 |
+
matches.add(remove_type_in_cnf(s[clause_start_id : i + 1]))
|
| 112 |
+
return matches
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
##### End of parsing functions
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
####### Domain (the env for each planning task)
|
| 119 |
+
class Domain:
|
| 120 |
+
def __init__(self, name, domain_pddl):
|
| 121 |
+
# self.name = name
|
| 122 |
+
|
| 123 |
+
# Domain files
|
| 124 |
+
self.domain_pddl = domain_pddl
|
| 125 |
+
self.action_name, self.action_params, self.action_params_dict = (
|
| 126 |
+
self.get_domain_action()
|
| 127 |
+
)
|
| 128 |
+
self.gt_cond_dict = self.parse_gt_pre_post_cond()
|
| 129 |
+
|
| 130 |
+
def get_domain_action(self):
|
| 131 |
+
action_pddl_str_list, all_actions = parse_pddl_attr_from_string(
|
| 132 |
+
self.domain_pddl, attr_starter="(:action"
|
| 133 |
+
)
|
| 134 |
+
action_name, action_params, action_params_dict = [], [], []
|
| 135 |
+
for action_pddl_str, (name, action_attr) in zip(
|
| 136 |
+
action_pddl_str_list, all_actions.items()
|
| 137 |
+
):
|
| 138 |
+
assert len(action_attr) == 3
|
| 139 |
+
param_str, pre_cond_str, post_cond_str = action_attr
|
| 140 |
+
action_name.append(name)
|
| 141 |
+
action_params.append(param_str)
|
| 142 |
+
action_params_dict.append(parse_pddl_param_list(param_str)[1])
|
| 143 |
+
return action_name, action_params, action_params_dict
|
| 144 |
+
|
| 145 |
+
def parse_gt_pre_post_cond(self):
|
| 146 |
+
cond_dict = {}
|
| 147 |
+
for a in self.action_name:
|
| 148 |
+
act_str = self.domain_pddl.split(f"(:action {a}")[1]
|
| 149 |
+
for postfix in ["pre", "post"]:
|
| 150 |
+
split_tag = ":precondition" if postfix == "pre" else ":effect"
|
| 151 |
+
cond_str = act_str.split(split_tag)[1].strip()
|
| 152 |
+
if cond_str.startswith("(and"):
|
| 153 |
+
cond_dict[f"{a}_{postfix}"] = split_cnf_by_parentheses(cond_str)
|
| 154 |
+
else:
|
| 155 |
+
cond_dict[f"{a}_{postfix}"] = {cond_str.split(")")[0].strip() + ")"}
|
| 156 |
+
cond_dict[f"{a}_{postfix}"] = sorted(
|
| 157 |
+
list(cond_dict[f"{a}_{postfix}"]),
|
| 158 |
+
key=lambda x: 0 if x.startswith("(not ") else 1,
|
| 159 |
+
)
|
| 160 |
+
return cond_dict
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
##### Transition functions
|
| 164 |
+
def construct_param_to_obj(domain, action):
|
| 165 |
+
action = action[1:-1]
|
| 166 |
+
a_name = action.split(" ")[0].strip()
|
| 167 |
+
objs = action.split(" ")[1:]
|
| 168 |
+
a_index = domain.action_name.index(a_name)
|
| 169 |
+
assert len(objs) == len(domain.action_params_dict[a_index])
|
| 170 |
+
return {p: obj for p, obj in zip(domain.action_params_dict[a_index], objs)}, a_name
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def state_transition(current_state, effects, param_to_obj):
|
| 174 |
+
for obj_cond in effects:
|
| 175 |
+
for param in param_to_obj:
|
| 176 |
+
obj_cond = re.sub(
|
| 177 |
+
r"\?{}(?=[^\w-])".format(param), param_to_obj[param], obj_cond
|
| 178 |
+
)
|
| 179 |
+
_, reversed_cond = parse_pddl_attr_from_string(obj_cond, attr_starter="(not ")
|
| 180 |
+
if reversed_cond:
|
| 181 |
+
assert len(reversed_cond) == 1
|
| 182 |
+
if reversed_cond[0] in current_state:
|
| 183 |
+
current_state.remove(reversed_cond[0])
|
| 184 |
+
elif obj_cond.strip() not in current_state:
|
| 185 |
+
current_state.append(obj_cond)
|
| 186 |
+
return current_state
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def check_pre_conds_satisfy(current_state, pre_conds, param_to_obj):
|
| 190 |
+
for obj_cond in pre_conds:
|
| 191 |
+
for param in param_to_obj:
|
| 192 |
+
obj_cond = re.sub(
|
| 193 |
+
r"\?{}(?=[^\w-])".format(param), param_to_obj[param], obj_cond
|
| 194 |
+
)
|
| 195 |
+
if (obj_cond.startswith("(not ") and obj_cond in current_state) or (
|
| 196 |
+
not obj_cond.startswith("(not ") and obj_cond not in current_state
|
| 197 |
+
):
|
| 198 |
+
return False
|
| 199 |
+
return True
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
##### End of transition functions
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class SymbolicPlanningMetricTest:
|
| 206 |
+
"""An example metric for symbolic planning tasks"""
|
| 207 |
+
|
| 208 |
+
@classmethod
|
| 209 |
+
def match(cls, response, eval_context, task_info=None):
|
| 210 |
+
## Initialize domain
|
| 211 |
+
# task_name = task_info["task_name"]
|
| 212 |
+
domain_pddl = eval_context["domain_pddl"]
|
| 213 |
+
domain = Domain(" ", domain_pddl)
|
| 214 |
+
|
| 215 |
+
## Parse trajectory, setup initial and goal state
|
| 216 |
+
# response = eval_context["gt_plan"] # for debug
|
| 217 |
+
match response:
|
| 218 |
+
case str():
|
| 219 |
+
candidates = response.split("\n")
|
| 220 |
+
case tuple() | list():
|
| 221 |
+
candidates = list(response)
|
| 222 |
+
case _:
|
| 223 |
+
raise ValueError(
|
| 224 |
+
f"`response` has unsupported type: {type(response)=}, {response=}"
|
| 225 |
+
)
|
| 226 |
+
cand_traj = [cand_a.strip() for cand_a in candidates if cand_a.startswith("(")]
|
| 227 |
+
try:
|
| 228 |
+
task_pddl = eval_context["task_pddl"]
|
| 229 |
+
cur_state = parse_pddl_attr_from_string(task_pddl, attr_starter="(:init")[1]
|
| 230 |
+
goal_state = parse_pddl_attr_from_string(task_pddl, attr_starter="(and")[1]
|
| 231 |
+
except IndexError:
|
| 232 |
+
score = 0
|
| 233 |
+
return score
|
| 234 |
+
|
| 235 |
+
score = 1
|
| 236 |
+
try:
|
| 237 |
+
## State transitions and check if satisfy the preconditions
|
| 238 |
+
for cand_a in cand_traj:
|
| 239 |
+
param_to_obj, a_name = construct_param_to_obj(domain, cand_a)
|
| 240 |
+
if not check_pre_conds_satisfy(
|
| 241 |
+
cur_state, domain.gt_cond_dict[f"{a_name}_pre"], param_to_obj
|
| 242 |
+
):
|
| 243 |
+
print(f"precondition of the action {cand_a} is not satisfied!")
|
| 244 |
+
score = 0
|
| 245 |
+
break
|
| 246 |
+
cur_state = state_transition(
|
| 247 |
+
cur_state, domain.gt_cond_dict[f"{a_name}_post"], param_to_obj
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
## Check if goal conditions are reached in the final state
|
| 251 |
+
if score == 1:
|
| 252 |
+
for g_state in goal_state:
|
| 253 |
+
if (g_state.startswith("(not ") and g_state in cur_state) or (
|
| 254 |
+
not g_state.startswith("(not ") and g_state not in cur_state
|
| 255 |
+
):
|
| 256 |
+
print(f"goal state {g_state} is not reached!")
|
| 257 |
+
score = 0
|
| 258 |
+
break
|
| 259 |
+
except ValueError:
|
| 260 |
+
# grammar error in execution
|
| 261 |
+
score = 0
|
| 262 |
+
except AssertionError:
|
| 263 |
+
# assertion error in functions
|
| 264 |
+
score = 0
|
| 265 |
+
|
| 266 |
+
return score
|