Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Shapegrid/ShapeGrid_count.tsv +0 -0
- Shapegrid/ShapeGrid_dis.tsv +0 -0
- VLMEvalKit-sudoku/.gitignore +212 -0
- VLMEvalKit-sudoku/docs/zh-CN/cp_origin_docs.sh +9 -0
- VLMEvalKit-sudoku/llava/eval/eval_textvqa.py +65 -0
- VLMEvalKit-sudoku/run.py +500 -0
- VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/__pycache__/cg_av_counting.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/cg_av_counting.py +405 -0
- VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/utils.py +422 -0
- VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/README.md +79 -0
- VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/__pycache__/utils.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/cvmhat_preprocess.py +45 -0
- VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/tf2023_preprocess.py +71 -0
- VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/utils.py +771 -0
- VLMEvalKit-sudoku/vlmeval/dataset/GUI/__init__.py +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/GUI/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/GUI/__pycache__/screenspot_pro.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/GUI/__pycache__/screenspot_v2.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/GUI/screenspot.py +461 -0
- VLMEvalKit-sudoku/vlmeval/dataset/GUI/screenspot_pro.py +460 -0
- VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/data_preprocess.py +447 -0
- VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/requirements.txt +13 -0
- VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/utils.py +1916 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/IoUscore_metric.py +87 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/TEDS_metric.py +930 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/__pycache__/rrc_evaluation_funcs_1_1.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/__pycache__/script.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +456 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/__init__.py +11 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/ocr_evaluator.py +106 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mathvista.py +164 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_nbbox_iou_tuple_agg_jaccard.py +27 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/exact_str_match.py +48 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/normalized_similarity_damerau_levenshtein.py +14 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mmif/__init__.py +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mmif/__pycache__/function_and_compare.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mmif/function_and_compare.py +429 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/tempcompass.py +254 -0
- VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/eagle_x.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/falcon_vlm.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llava_uhd_siglip2.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/parrot.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/visualglm.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vlaa_thinker.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/wethink_vl.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/hawk_vl/__init__.py +2 -0
- VLMEvalKit-sudoku/vlmeval/vlm/hawk_vl/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/vlm/hawk_vl/__pycache__/model.cpython-310.pyc +0 -0
Shapegrid/ShapeGrid_count.tsv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Shapegrid/ShapeGrid_dis.tsv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
VLMEvalKit-sudoku/.gitignore
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.idea/
|
| 2 |
+
|
| 3 |
+
# Byte-compiled / optimized / DLL files
|
| 4 |
+
__pycache__/
|
| 5 |
+
*.py[cod]
|
| 6 |
+
*$py.class
|
| 7 |
+
|
| 8 |
+
# C extensions
|
| 9 |
+
*.so
|
| 10 |
+
|
| 11 |
+
# Distribution / packaging
|
| 12 |
+
.Python
|
| 13 |
+
build/
|
| 14 |
+
develop-eggs/
|
| 15 |
+
dist/
|
| 16 |
+
downloads/
|
| 17 |
+
eggs/
|
| 18 |
+
.eggs/
|
| 19 |
+
lib/
|
| 20 |
+
lib64/
|
| 21 |
+
parts/
|
| 22 |
+
sdist/
|
| 23 |
+
var/
|
| 24 |
+
wheels/
|
| 25 |
+
share/python-wheels/
|
| 26 |
+
*.egg-info/
|
| 27 |
+
.installed.cfg
|
| 28 |
+
*.egg
|
| 29 |
+
MANIFEST
|
| 30 |
+
.vscode/
|
| 31 |
+
.gradio/
|
| 32 |
+
|
| 33 |
+
# PyInstaller
|
| 34 |
+
# Usually these files are written by a python script from a template
|
| 35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 36 |
+
*.manifest
|
| 37 |
+
*.spec
|
| 38 |
+
|
| 39 |
+
# Installer logs
|
| 40 |
+
pip-log.txt
|
| 41 |
+
pip-delete-this-directory.txt
|
| 42 |
+
|
| 43 |
+
# Unit test / coverage reports
|
| 44 |
+
htmlcov/
|
| 45 |
+
.tox/
|
| 46 |
+
.nox/
|
| 47 |
+
.coverage
|
| 48 |
+
.coverage.*
|
| 49 |
+
.cache
|
| 50 |
+
nosetests.xml
|
| 51 |
+
coverage.xml
|
| 52 |
+
*.cover
|
| 53 |
+
*.py,cover
|
| 54 |
+
.hypothesis/
|
| 55 |
+
.pytest_cache/
|
| 56 |
+
cover/
|
| 57 |
+
|
| 58 |
+
# Translations
|
| 59 |
+
*.mo
|
| 60 |
+
*.pot
|
| 61 |
+
|
| 62 |
+
# Django stuff:
|
| 63 |
+
*.log
|
| 64 |
+
local_settings.py
|
| 65 |
+
db.sqlite3
|
| 66 |
+
db.sqlite3-journal
|
| 67 |
+
|
| 68 |
+
# Flask stuff:
|
| 69 |
+
instance/
|
| 70 |
+
.webassets-cache
|
| 71 |
+
|
| 72 |
+
# Scrapy stuff:
|
| 73 |
+
.scrapy
|
| 74 |
+
|
| 75 |
+
# Sphinx documentation
|
| 76 |
+
docs/_build/
|
| 77 |
+
|
| 78 |
+
# PyBuilder
|
| 79 |
+
.pybuilder/
|
| 80 |
+
target/
|
| 81 |
+
|
| 82 |
+
# Jupyter Notebook
|
| 83 |
+
.ipynb_checkpoints
|
| 84 |
+
|
| 85 |
+
# IPython
|
| 86 |
+
profile_default/
|
| 87 |
+
ipython_config.py
|
| 88 |
+
|
| 89 |
+
# pyenv
|
| 90 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 92 |
+
# .python-version
|
| 93 |
+
|
| 94 |
+
# pipenv
|
| 95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 98 |
+
# install all needed dependencies.
|
| 99 |
+
#Pipfile.lock
|
| 100 |
+
|
| 101 |
+
# poetry
|
| 102 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 103 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 104 |
+
# commonly ignored for libraries.
|
| 105 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 106 |
+
#poetry.lock
|
| 107 |
+
|
| 108 |
+
# pdm
|
| 109 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 110 |
+
#pdm.lock
|
| 111 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 112 |
+
# in version control.
|
| 113 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 114 |
+
.pdm.toml
|
| 115 |
+
|
| 116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 117 |
+
__pypackages__/
|
| 118 |
+
|
| 119 |
+
# Celery stuff
|
| 120 |
+
celerybeat-schedule
|
| 121 |
+
celerybeat.pid
|
| 122 |
+
|
| 123 |
+
# SageMath parsed files
|
| 124 |
+
*.sage.py
|
| 125 |
+
|
| 126 |
+
# Environments
|
| 127 |
+
.env
|
| 128 |
+
.venv
|
| 129 |
+
env/
|
| 130 |
+
venv/
|
| 131 |
+
ENV/
|
| 132 |
+
env.bak/
|
| 133 |
+
venv.bak/
|
| 134 |
+
environment.yml
|
| 135 |
+
|
| 136 |
+
# Spyder project settings
|
| 137 |
+
.spyderproject
|
| 138 |
+
.spyproject
|
| 139 |
+
|
| 140 |
+
# Rope project settings
|
| 141 |
+
.ropeproject
|
| 142 |
+
|
| 143 |
+
# mkdocs documentation
|
| 144 |
+
/site
|
| 145 |
+
|
| 146 |
+
# mypy
|
| 147 |
+
.mypy_cache/
|
| 148 |
+
.dmypy.json
|
| 149 |
+
dmypy.json
|
| 150 |
+
|
| 151 |
+
# Pyre type checker
|
| 152 |
+
.pyre/
|
| 153 |
+
|
| 154 |
+
# pytype static type analyzer
|
| 155 |
+
.pytype/
|
| 156 |
+
|
| 157 |
+
# Cython debug symbols
|
| 158 |
+
cython_debug/
|
| 159 |
+
|
| 160 |
+
# Images
|
| 161 |
+
images/
|
| 162 |
+
|
| 163 |
+
scripts/*ttf
|
| 164 |
+
.history
|
| 165 |
+
cache_dir/*
|
| 166 |
+
|
| 167 |
+
# Evaluation Outputs
|
| 168 |
+
outputs/*
|
| 169 |
+
demo.ipynb
|
| 170 |
+
*json
|
| 171 |
+
!vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/**/*.json
|
| 172 |
+
.vscode
|
| 173 |
+
*.swp
|
| 174 |
+
GPT4o_MINI/
|
| 175 |
+
|
| 176 |
+
2weiyun*
|
| 177 |
+
script.py
|
| 178 |
+
Gemini*
|
| 179 |
+
Claude3-5V*
|
| 180 |
+
GLM4V*
|
| 181 |
+
GPT4o*
|
| 182 |
+
GPT4V*
|
| 183 |
+
mmmu_debug
|
| 184 |
+
bailingMM
|
| 185 |
+
BailingMM*
|
| 186 |
+
SenseChat*
|
| 187 |
+
Step*
|
| 188 |
+
DoubaoVL
|
| 189 |
+
arch
|
| 190 |
+
BlueLM*
|
| 191 |
+
mmb_*
|
| 192 |
+
gpt-4.1*
|
| 193 |
+
Reka*
|
| 194 |
+
Taiyi
|
| 195 |
+
TeleMM
|
| 196 |
+
apple.jpg
|
| 197 |
+
assets/LOGO.png
|
| 198 |
+
api_list.txt
|
| 199 |
+
vlmeval/gemini_tmp.py
|
| 200 |
+
run.sh
|
| 201 |
+
run_g.sh
|
| 202 |
+
tmp/
|
| 203 |
+
InternVL*
|
| 204 |
+
Qwen*
|
| 205 |
+
CongRong*
|
| 206 |
+
Seed1.5*
|
| 207 |
+
aguvis*
|
| 208 |
+
grok-*
|
| 209 |
+
GLM4.5*
|
| 210 |
+
SenseNova*
|
| 211 |
+
|
| 212 |
+
.DS_Store
|
VLMEvalKit-sudoku/docs/zh-CN/cp_origin_docs.sh
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
|
| 3 |
+
# Copy *.md files from docs/ if it doesn't have a Chinese translation
|
| 4 |
+
|
| 5 |
+
for filename in $(find ../en/ -name '*.md' -printf "%P\n");
|
| 6 |
+
do
|
| 7 |
+
mkdir -p $(dirname $filename)
|
| 8 |
+
cp -n ../en/$filename ./$filename
|
| 9 |
+
done
|
VLMEvalKit-sudoku/llava/eval/eval_textvqa.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import json
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_args():
|
| 10 |
+
parser = argparse.ArgumentParser()
|
| 11 |
+
parser.add_argument('--annotation-file', type=str)
|
| 12 |
+
parser.add_argument('--result-file', type=str)
|
| 13 |
+
parser.add_argument('--result-dir', type=str)
|
| 14 |
+
return parser.parse_args()
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def prompt_processor(prompt):
|
| 18 |
+
if prompt.startswith('OCR tokens: '):
|
| 19 |
+
pattern = r"Question: (.*?) Short answer:"
|
| 20 |
+
match = re.search(pattern, prompt, re.DOTALL)
|
| 21 |
+
question = match.group(1)
|
| 22 |
+
elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3:
|
| 23 |
+
if prompt.startswith('Reference OCR token:'):
|
| 24 |
+
question = prompt.split('\n')[1]
|
| 25 |
+
else:
|
| 26 |
+
question = prompt.split('\n')[0]
|
| 27 |
+
elif len(prompt.split('\n')) == 2:
|
| 28 |
+
question = prompt.split('\n')[0]
|
| 29 |
+
else:
|
| 30 |
+
assert False
|
| 31 |
+
|
| 32 |
+
return question.lower()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def eval_single(annotation_file, result_file):
|
| 36 |
+
experiment_name = os.path.splitext(os.path.basename(result_file))[0]
|
| 37 |
+
print(experiment_name)
|
| 38 |
+
annotations = json.load(open(annotation_file))['data']
|
| 39 |
+
annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations}
|
| 40 |
+
results = [json.loads(line) for line in open(result_file)]
|
| 41 |
+
|
| 42 |
+
pred_list = []
|
| 43 |
+
for result in results:
|
| 44 |
+
annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))]
|
| 45 |
+
pred_list.append({
|
| 46 |
+
"pred_answer": result['text'],
|
| 47 |
+
"gt_answers": annotation['answers'],
|
| 48 |
+
})
|
| 49 |
+
|
| 50 |
+
evaluator = TextVQAAccuracyEvaluator()
|
| 51 |
+
print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list)))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if __name__ == "__main__":
|
| 55 |
+
args = get_args()
|
| 56 |
+
|
| 57 |
+
if args.result_file is not None:
|
| 58 |
+
eval_single(args.annotation_file, args.result_file)
|
| 59 |
+
|
| 60 |
+
if args.result_dir is not None:
|
| 61 |
+
for result_file in sorted(os.listdir(args.result_dir)):
|
| 62 |
+
if not result_file.endswith('.jsonl'):
|
| 63 |
+
print(f'Skipping {result_file}')
|
| 64 |
+
continue
|
| 65 |
+
eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
|
VLMEvalKit-sudoku/run.py
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import subprocess
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# GET the number of GPUs on the node without importing libs like torch
|
| 8 |
+
def get_gpu_list():
|
| 9 |
+
CUDA_VISIBLE_DEVICES = os.environ.get('CUDA_VISIBLE_DEVICES', '')
|
| 10 |
+
if CUDA_VISIBLE_DEVICES != '':
|
| 11 |
+
gpu_list = [int(x) for x in CUDA_VISIBLE_DEVICES.split(',')]
|
| 12 |
+
return gpu_list
|
| 13 |
+
try:
|
| 14 |
+
ps = subprocess.Popen(('nvidia-smi', '--list-gpus'), stdout=subprocess.PIPE)
|
| 15 |
+
output = subprocess.check_output(('wc', '-l'), stdin=ps.stdout)
|
| 16 |
+
return list(range(int(output)))
|
| 17 |
+
except:
|
| 18 |
+
return []
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
RANK = int(os.environ.get('RANK', 0))
|
| 22 |
+
WORLD_SIZE = int(os.environ.get('WORLD_SIZE', 1))
|
| 23 |
+
LOCAL_WORLD_SIZE = int(os.environ.get("LOCAL_WORLD_SIZE",1))
|
| 24 |
+
LOCAL_RANK = int(os.environ.get("LOCAL_RANK",1))
|
| 25 |
+
|
| 26 |
+
GPU_LIST = get_gpu_list()
|
| 27 |
+
if LOCAL_WORLD_SIZE > 1 and len(GPU_LIST):
|
| 28 |
+
NGPU = len(GPU_LIST)
|
| 29 |
+
assert NGPU >= LOCAL_WORLD_SIZE, "The number of processes should be less than or equal to the number of GPUs"
|
| 30 |
+
GPU_PER_PROC = NGPU // LOCAL_WORLD_SIZE
|
| 31 |
+
DEVICE_START_IDX = GPU_PER_PROC * LOCAL_RANK
|
| 32 |
+
CUDA_VISIBLE_DEVICES = [str(i) for i in GPU_LIST[DEVICE_START_IDX: DEVICE_START_IDX + GPU_PER_PROC]]
|
| 33 |
+
CUDA_VISIBLE_DEVICES = ','.join(CUDA_VISIBLE_DEVICES)
|
| 34 |
+
# Set CUDA_VISIBLE_DEVICES
|
| 35 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = CUDA_VISIBLE_DEVICES
|
| 36 |
+
print(
|
| 37 |
+
f'RANK: {RANK}, LOCAL_RANK: {LOCAL_RANK}, WORLD_SIZE: {WORLD_SIZE},'
|
| 38 |
+
f'LOCAL_WORLD_SIZE: {LOCAL_WORLD_SIZE}, CUDA_VISIBLE_DEVICES: {CUDA_VISIBLE_DEVICES}'
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
from vlmeval.config import supported_VLM
|
| 43 |
+
from vlmeval.dataset.video_dataset_config import supported_video_datasets
|
| 44 |
+
from vlmeval.dataset import build_dataset
|
| 45 |
+
from vlmeval.inference import infer_data_job
|
| 46 |
+
from vlmeval.inference_video import infer_data_job_video
|
| 47 |
+
from vlmeval.inference_mt import infer_data_job_mt
|
| 48 |
+
from vlmeval.smp import *
|
| 49 |
+
from vlmeval.utils.result_transfer import MMMU_result_transfer, MMTBench_result_transfer
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# Make WORLD_SIZE invisible when build models
|
| 53 |
+
def build_model_from_config(cfg, model_name, use_vllm=False):
|
| 54 |
+
import vlmeval.api
|
| 55 |
+
import vlmeval.vlm
|
| 56 |
+
ws_bak = os.environ.pop('WORLD_SIZE', None)
|
| 57 |
+
|
| 58 |
+
config = cp.deepcopy(cfg[model_name])
|
| 59 |
+
if use_vllm:
|
| 60 |
+
config['use_vllm'] = use_vllm
|
| 61 |
+
if 'class' not in config:
|
| 62 |
+
return supported_VLM[model_name](**config)
|
| 63 |
+
cls_name = config.pop('class')
|
| 64 |
+
if hasattr(vlmeval.api, cls_name):
|
| 65 |
+
model = getattr(vlmeval.api, cls_name)(**config)
|
| 66 |
+
elif hasattr(vlmeval.vlm, cls_name):
|
| 67 |
+
model = getattr(vlmeval.vlm, cls_name)(**config)
|
| 68 |
+
else:
|
| 69 |
+
raise ValueError(f'Class {cls_name} is not supported in `vlmeval.api` or `vlmeval.vlm`')
|
| 70 |
+
|
| 71 |
+
if ws_bak:
|
| 72 |
+
os.environ['WORLD_SIZE'] = ws_bak
|
| 73 |
+
return model
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def build_dataset_from_config(cfg, dataset_name):
|
| 77 |
+
import vlmeval.dataset
|
| 78 |
+
import inspect
|
| 79 |
+
config = cp.deepcopy(cfg[dataset_name])
|
| 80 |
+
if config == {}:
|
| 81 |
+
return supported_video_datasets[dataset_name]()
|
| 82 |
+
assert 'class' in config
|
| 83 |
+
cls_name = config.pop('class')
|
| 84 |
+
if hasattr(vlmeval.dataset, cls_name):
|
| 85 |
+
cls = getattr(vlmeval.dataset, cls_name)
|
| 86 |
+
sig = inspect.signature(cls.__init__)
|
| 87 |
+
valid_params = {k: v for k, v in config.items() if k in sig.parameters}
|
| 88 |
+
if cls.MODALITY == 'VIDEO':
|
| 89 |
+
if valid_params.get('fps', 0) > 0 and valid_params.get('nframe', 0) > 0:
|
| 90 |
+
raise ValueError('fps and nframe should not be set at the same time')
|
| 91 |
+
if valid_params.get('fps', 0) <= 0 and valid_params.get('nframe', 0) <= 0:
|
| 92 |
+
raise ValueError('fps and nframe should be set at least one valid value')
|
| 93 |
+
return cls(**valid_params)
|
| 94 |
+
else:
|
| 95 |
+
raise ValueError(f'Class {cls_name} is not supported in `vlmeval.dataset`')
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def parse_args():
|
| 99 |
+
help_msg = """\
|
| 100 |
+
You can launch the evaluation by setting either --data and --model or --config.
|
| 101 |
+
|
| 102 |
+
--data and --model:
|
| 103 |
+
Each Arg should be a list of strings, specifying the names of datasets and models.
|
| 104 |
+
To find all supported model names, please refer to the `vlmeval/config.py` of check the output of the command \
|
| 105 |
+
`vlmutil mlist all` in the terminal (you should first have vlmeval installed).
|
| 106 |
+
To find all supported dataset names, please refer to the `vlmeval/dataset/__init__.py` file. The python script \
|
| 107 |
+
to print all supported dataset names is as follows:
|
| 108 |
+
```python
|
| 109 |
+
from vlmeval.dataset import SUPPORTED_DATASETS
|
| 110 |
+
print(SUPPORTED_DATASETS)
|
| 111 |
+
```
|
| 112 |
+
or you can check the output of the command `vlmutil dlist all` in the terminal.
|
| 113 |
+
To find all supported video dataset default settings, please refer to the \
|
| 114 |
+
`vlmeval/dataset/video_dataset_config.py` file.
|
| 115 |
+
|
| 116 |
+
--config:
|
| 117 |
+
Launch the evaluation by specifying the path to the config json file. Sample Json Content:
|
| 118 |
+
```json
|
| 119 |
+
{
|
| 120 |
+
"model": {
|
| 121 |
+
"GPT4o_20240806_T00_HIGH": {
|
| 122 |
+
"class": "GPT4V",
|
| 123 |
+
"model": "gpt-4o-2024-08-06",
|
| 124 |
+
"temperature": 0,
|
| 125 |
+
"img_detail": "high"
|
| 126 |
+
},
|
| 127 |
+
"GPT4o_20240806_T10_Low": {
|
| 128 |
+
"class": "GPT4V",
|
| 129 |
+
"model": "gpt-4o-2024-08-06",
|
| 130 |
+
"temperature": 1.0,
|
| 131 |
+
"img_detail": "low"
|
| 132 |
+
},
|
| 133 |
+
"GPT4o_20241120": {}
|
| 134 |
+
},
|
| 135 |
+
"data": {
|
| 136 |
+
"MME-RealWorld-Lite": {
|
| 137 |
+
"class": "MMERealWorld",
|
| 138 |
+
"dataset": "MME-RealWorld-Lite"
|
| 139 |
+
},
|
| 140 |
+
"MMBench_DEV_EN_V11": {
|
| 141 |
+
"class": "ImageMCQDataset",
|
| 142 |
+
"dataset": "MMBench_DEV_EN_V11"
|
| 143 |
+
},
|
| 144 |
+
"MMBench_Video_8frame_nopack": {},
|
| 145 |
+
"Video-MME_16frame_subs": {
|
| 146 |
+
"class": "VideoMME",
|
| 147 |
+
"dataset": "Video-MME",
|
| 148 |
+
"nframe": 16,
|
| 149 |
+
"use_subtitle": true,
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
```
|
| 154 |
+
Currently, only `model` and `data` are supported fields. The content of each field is a dictionary.
|
| 155 |
+
For `model`, the key is the name of the model, and the value is a dictionary containing the following keys:
|
| 156 |
+
- `class`: The class name of the model, which should be a class in `vlmeval.vlm` or `vlmeval.api`.
|
| 157 |
+
- Other keys are specific to the model, please refer to the corresponding class.
|
| 158 |
+
- Tip: The defined model in the `supported_VLM` of `vlmeval/config.py` can be used as a shortcut.
|
| 159 |
+
For `data`, the key is the name of the dataset (should be the same as the `dataset` field in most cases, \
|
| 160 |
+
except for video datasets), and the value is a dictionary containing the following keys:
|
| 161 |
+
- `class`: The class name of the dataset, which should be a class in `vlmeval.dataset`.
|
| 162 |
+
- `dataset`: The name of the dataset, which should be a string that is accepted by the `dataset` argument of the \
|
| 163 |
+
corresponding class.
|
| 164 |
+
- Other keys are specific to the dataset, please refer to the corresponding class.
|
| 165 |
+
- Tip: The defined dataset in the `supported_video_datasets` of `vlmeval/dataset/video_dataset_config.py` \
|
| 166 |
+
can be used as a shortcut.
|
| 167 |
+
|
| 168 |
+
The keys in the `model` and `data` fields will be used for naming the prediction files and evaluation results.
|
| 169 |
+
When launching with `--config`, args for API VLMs, such as `--retry`, `--verbose`, will be ignored.
|
| 170 |
+
"""
|
| 171 |
+
parser = argparse.ArgumentParser(description=help_msg, formatter_class=argparse.RawTextHelpFormatter)
|
| 172 |
+
# Essential Args, Setting the Names of Datasets and Models
|
| 173 |
+
parser.add_argument('--data', type=str, nargs='+', help='Names of Datasets')
|
| 174 |
+
parser.add_argument('--model', type=str, nargs='+', help='Names of Models')
|
| 175 |
+
parser.add_argument('--config', type=str, help='Path to the Config Json File')
|
| 176 |
+
# Work Dir
|
| 177 |
+
parser.add_argument('--work-dir', type=str, default='./outputs', help='select the output directory')
|
| 178 |
+
# Infer + Eval or Infer Only
|
| 179 |
+
parser.add_argument('--mode', type=str, default='all', choices=['all', 'infer', 'eval'])
|
| 180 |
+
# API Kwargs, Apply to API VLMs and Judge API LLMs
|
| 181 |
+
parser.add_argument('--api-nproc', type=int, default=4, help='Parallel API calling')
|
| 182 |
+
parser.add_argument('--retry', type=int, default=None, help='retry numbers for API VLMs')
|
| 183 |
+
parser.add_argument('--judge-args', type=str, default=None, help='Judge arguments in JSON format')
|
| 184 |
+
# Explicitly Set the Judge Model
|
| 185 |
+
parser.add_argument('--judge', type=str, default=None)
|
| 186 |
+
# Logging Utils
|
| 187 |
+
parser.add_argument('--verbose', action='store_true')
|
| 188 |
+
# Configuration for Resume
|
| 189 |
+
# Ignore: will not rerun failed VLM inference
|
| 190 |
+
parser.add_argument('--ignore', action='store_true', help='Ignore failed indices. ')
|
| 191 |
+
# Reuse: will reuse the existing prediction files
|
| 192 |
+
parser.add_argument('--reuse', action='store_true')
|
| 193 |
+
# Reuse-aux: if set, when reuse is True, will also reuse the auxiliary evaluation files
|
| 194 |
+
parser.add_argument('--reuse-aux', type=int, default=True, help='reuse auxiliary evaluation files')
|
| 195 |
+
parser.add_argument(
|
| 196 |
+
'--use-vllm', action='store_true', help='use vllm to generate, the flag is only supported in Llama4 for now')
|
| 197 |
+
parser.add_argument('--use-verifier', action='store_true', help='use verifier to evaluate')
|
| 198 |
+
|
| 199 |
+
args = parser.parse_args()
|
| 200 |
+
return args
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def main():
|
| 204 |
+
logger = get_logger('RUN')
|
| 205 |
+
args = parse_args()
|
| 206 |
+
use_config, cfg = False, None
|
| 207 |
+
if args.config is not None:
|
| 208 |
+
assert args.data is None and args.model is None, '--data and --model should not be set when using --config'
|
| 209 |
+
use_config, cfg = True, load(args.config)
|
| 210 |
+
args.model = list(cfg['model'].keys())
|
| 211 |
+
args.data = list(cfg['data'].keys())
|
| 212 |
+
else:
|
| 213 |
+
assert len(args.data), '--data should be a list of data files'
|
| 214 |
+
|
| 215 |
+
if RANK == 0:
|
| 216 |
+
if not args.reuse:
|
| 217 |
+
logger.warning('--reuse is not set, will not reuse previous (before one day) temporary files')
|
| 218 |
+
else:
|
| 219 |
+
logger.warning('--reuse is set, will reuse the latest prediction & temporary pickle files')
|
| 220 |
+
|
| 221 |
+
if 'MMEVAL_ROOT' in os.environ:
|
| 222 |
+
args.work_dir = os.environ['MMEVAL_ROOT']
|
| 223 |
+
|
| 224 |
+
if not use_config:
|
| 225 |
+
for k, v in supported_VLM.items():
|
| 226 |
+
if hasattr(v, 'keywords') and 'retry' in v.keywords and args.retry is not None:
|
| 227 |
+
v.keywords['retry'] = args.retry
|
| 228 |
+
supported_VLM[k] = v
|
| 229 |
+
if hasattr(v, 'keywords') and 'verbose' in v.keywords and args.verbose is not None:
|
| 230 |
+
v.keywords['verbose'] = args.verbose
|
| 231 |
+
supported_VLM[k] = v
|
| 232 |
+
|
| 233 |
+
# If FWD_API is set, will use class `GPT4V` for all API models in the config
|
| 234 |
+
if os.environ.get('FWD_API', None) == '1':
|
| 235 |
+
from vlmeval.config import api_models as supported_APIs
|
| 236 |
+
from vlmeval.api import GPT4V
|
| 237 |
+
for m in args.model:
|
| 238 |
+
if m in supported_APIs:
|
| 239 |
+
kws = supported_VLM[m].keywords
|
| 240 |
+
supported_VLM[m] = partial(GPT4V, **kws)
|
| 241 |
+
logger.warning(f'FWD_API is set, will use class `GPT4V` for {m}')
|
| 242 |
+
|
| 243 |
+
if WORLD_SIZE > 1:
|
| 244 |
+
import torch.distributed as dist
|
| 245 |
+
dist.init_process_group(
|
| 246 |
+
backend='nccl',
|
| 247 |
+
timeout=datetime.timedelta(seconds=int(os.environ.get('DIST_TIMEOUT', 3600)))
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
for _, model_name in enumerate(args.model):
|
| 251 |
+
model = None
|
| 252 |
+
date, commit_id = timestr('day'), githash(digits=8)
|
| 253 |
+
eval_id = f"T{date}_G{commit_id}"
|
| 254 |
+
|
| 255 |
+
pred_root = osp.join(args.work_dir, model_name, eval_id)
|
| 256 |
+
pred_root_meta = osp.join(args.work_dir, model_name)
|
| 257 |
+
os.makedirs(pred_root_meta, exist_ok=True)
|
| 258 |
+
|
| 259 |
+
prev_pred_roots = ls(osp.join(args.work_dir, model_name), mode='dir')
|
| 260 |
+
if len(prev_pred_roots) and args.reuse:
|
| 261 |
+
prev_pred_roots.sort()
|
| 262 |
+
|
| 263 |
+
if not osp.exists(pred_root):
|
| 264 |
+
os.makedirs(pred_root, exist_ok=True)
|
| 265 |
+
|
| 266 |
+
if use_config:
|
| 267 |
+
model = build_model_from_config(cfg['model'], model_name, args.use_vllm)
|
| 268 |
+
|
| 269 |
+
for _, dataset_name in enumerate(args.data):
|
| 270 |
+
if WORLD_SIZE > 1:
|
| 271 |
+
dist.barrier()
|
| 272 |
+
|
| 273 |
+
try:
|
| 274 |
+
pred_format = get_pred_file_format()
|
| 275 |
+
result_file_base = f'{model_name}_{dataset_name}.{pred_format}'
|
| 276 |
+
|
| 277 |
+
if use_config:
|
| 278 |
+
if WORLD_SIZE > 1:
|
| 279 |
+
if RANK == 0:
|
| 280 |
+
dataset = build_dataset_from_config(cfg['data'], dataset_name)
|
| 281 |
+
dist.barrier()
|
| 282 |
+
dataset = build_dataset_from_config(cfg['data'], dataset_name)
|
| 283 |
+
if dataset is None:
|
| 284 |
+
logger.error(f'Dataset {dataset_name} is not valid, will be skipped. ')
|
| 285 |
+
continue
|
| 286 |
+
else:
|
| 287 |
+
dataset_kwargs = {}
|
| 288 |
+
if dataset_name in ['MMLongBench_DOC', 'DUDE', 'DUDE_MINI', 'SLIDEVQA', 'SLIDEVQA_MINI']:
|
| 289 |
+
dataset_kwargs['model'] = model_name
|
| 290 |
+
|
| 291 |
+
# If distributed, first build the dataset on the main process for doing preparation works
|
| 292 |
+
if WORLD_SIZE > 1:
|
| 293 |
+
if RANK == 0:
|
| 294 |
+
dataset = build_dataset(dataset_name, **dataset_kwargs)
|
| 295 |
+
dist.barrier()
|
| 296 |
+
|
| 297 |
+
dataset = build_dataset(dataset_name, **dataset_kwargs)
|
| 298 |
+
if dataset is None:
|
| 299 |
+
logger.error(f'Dataset {dataset_name} is not valid, will be skipped. ')
|
| 300 |
+
continue
|
| 301 |
+
|
| 302 |
+
# Handling Multi-Turn Dataset
|
| 303 |
+
result_file = osp.join(pred_root, result_file_base)
|
| 304 |
+
# Reuse the previous prediction file if exists
|
| 305 |
+
if RANK == 0 and len(prev_pred_roots):
|
| 306 |
+
prepare_reuse_files(
|
| 307 |
+
pred_root_meta=pred_root_meta, eval_id=eval_id, model_name=model_name,
|
| 308 |
+
dataset_name=dataset_name, reuse=args.reuse, reuse_aux=args.reuse_aux
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
if WORLD_SIZE > 1:
|
| 312 |
+
dist.barrier()
|
| 313 |
+
|
| 314 |
+
if model is None:
|
| 315 |
+
model = model_name # which is only a name
|
| 316 |
+
|
| 317 |
+
if args.model != "eval":
|
| 318 |
+
# Perform the Inference
|
| 319 |
+
if dataset.MODALITY == 'VIDEO':
|
| 320 |
+
model = infer_data_job_video(
|
| 321 |
+
model,
|
| 322 |
+
work_dir=pred_root,
|
| 323 |
+
model_name=model_name,
|
| 324 |
+
dataset=dataset,
|
| 325 |
+
result_file_name=result_file_base,
|
| 326 |
+
verbose=args.verbose,
|
| 327 |
+
api_nproc=args.api_nproc,
|
| 328 |
+
use_vllm=args.use_vllm)
|
| 329 |
+
elif dataset.TYPE == 'MT':
|
| 330 |
+
model = infer_data_job_mt(
|
| 331 |
+
model,
|
| 332 |
+
work_dir=pred_root,
|
| 333 |
+
model_name=model_name,
|
| 334 |
+
dataset=dataset,
|
| 335 |
+
verbose=args.verbose,
|
| 336 |
+
api_nproc=args.api_nproc,
|
| 337 |
+
ignore_failed=args.ignore,
|
| 338 |
+
use_vllm=args.use_vllm)
|
| 339 |
+
else:
|
| 340 |
+
model = infer_data_job(
|
| 341 |
+
model,
|
| 342 |
+
work_dir=pred_root,
|
| 343 |
+
model_name=model_name,
|
| 344 |
+
dataset=dataset,
|
| 345 |
+
verbose=args.verbose,
|
| 346 |
+
api_nproc=args.api_nproc,
|
| 347 |
+
ignore_failed=args.ignore,
|
| 348 |
+
use_vllm=args.use_vllm)
|
| 349 |
+
|
| 350 |
+
# Set the judge kwargs first before evaluation or dumping
|
| 351 |
+
|
| 352 |
+
judge_kwargs = {
|
| 353 |
+
'nproc': args.api_nproc,
|
| 354 |
+
'verbose': args.verbose,
|
| 355 |
+
'retry': args.retry if args.retry is not None else 3,
|
| 356 |
+
**(json.loads(args.judge_args) if args.judge_args else {}),
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
if args.retry is not None:
|
| 360 |
+
judge_kwargs['retry'] = args.retry
|
| 361 |
+
if args.judge is not None:
|
| 362 |
+
judge_kwargs['model'] = args.judge
|
| 363 |
+
else:
|
| 364 |
+
print(dataset_name)
|
| 365 |
+
if dataset.TYPE in ['MCQ', 'Y/N', 'MCQ_MMMU_Pro'] or listinstr(
|
| 366 |
+
['moviechat1k', 'mme-reasoning'], dataset_name.lower()
|
| 367 |
+
):
|
| 368 |
+
if listinstr(['WeMath', 'MME-Reasoning'], dataset_name):
|
| 369 |
+
judge_kwargs['model'] = 'gpt-4o-mini'
|
| 370 |
+
elif listinstr(['VisuLogic'], dataset_name):
|
| 371 |
+
judge_kwargs['model'] = 'exact_matching'
|
| 372 |
+
else:
|
| 373 |
+
judge_kwargs['model'] = 'chatgpt-0125'
|
| 374 |
+
elif listinstr(['MMVet', 'LLaVABench', 'MMBench_Video'], dataset_name):
|
| 375 |
+
if listinstr(['LLaVABench_KO'], dataset_name):
|
| 376 |
+
judge_kwargs['model'] = 'gpt-4o-0806'
|
| 377 |
+
else:
|
| 378 |
+
judge_kwargs['model'] = 'gpt-4-turbo'
|
| 379 |
+
elif listinstr(['VGRPBench'], dataset_name):
|
| 380 |
+
judge_kwargs['model'] = 'gpt-4o'
|
| 381 |
+
elif listinstr(['MathVista', 'MathVerse', 'MathVision', 'DynaMath', 'VL-RewardBench', 'LogicVista', 'MOAT', 'OCR_Reasoning'], dataset_name): # noqa: E501
|
| 382 |
+
judge_kwargs['model'] = 'gpt-4o-mini'
|
| 383 |
+
elif listinstr(['OlympiadBench'], dataset_name):
|
| 384 |
+
use_api_judger = judge_kwargs.get("olympiad_use_api_judger", False)
|
| 385 |
+
if use_api_judger:
|
| 386 |
+
judge_kwargs['model'] = 'gpt-4o-mini'
|
| 387 |
+
elif listinstr(['MMLongBench', 'MMDU', 'DUDE', 'SLIDEVQA', 'MIA-Bench', 'WildVision', 'MMAlignBench', 'MM-IFEval'], dataset_name): # noqa: E501
|
| 388 |
+
judge_kwargs['model'] = 'gpt-4o'
|
| 389 |
+
elif listinstr(['ChartMimic'], dataset_name):
|
| 390 |
+
judge_kwargs['model'] = 'gpt-4o'
|
| 391 |
+
elif listinstr(['VDC'], dataset_name):
|
| 392 |
+
judge_kwargs['model'] = 'llama31-8b'
|
| 393 |
+
elif listinstr(['Video_MMLU_QA', 'Video_MMLU_CAP'], dataset_name):
|
| 394 |
+
judge_kwargs['model'] = 'qwen-72b'
|
| 395 |
+
elif listinstr(['MMVMBench'], dataset_name):
|
| 396 |
+
judge_kwargs['model'] = 'gpt-4o'
|
| 397 |
+
elif listinstr(['CVQA_EN', 'CVQA_LOC'], dataset_name):
|
| 398 |
+
judge_kwargs['model'] = 'gpt-4.1'
|
| 399 |
+
elif listinstr(['M4Bench'], dataset_name):
|
| 400 |
+
judge_kwargs['model'] = 'gpt-4o'
|
| 401 |
+
elif listinstr(['AyaVisionBench'], dataset_name):
|
| 402 |
+
judge_kwargs['model'] = 'gpt-4.1'
|
| 403 |
+
|
| 404 |
+
if args.use_verifier:
|
| 405 |
+
judge_kwargs['use_verifier'] = True
|
| 406 |
+
if args.use_vllm:
|
| 407 |
+
judge_kwargs['use_vllm'] = True
|
| 408 |
+
|
| 409 |
+
if RANK == 0:
|
| 410 |
+
logger.info(judge_kwargs)
|
| 411 |
+
|
| 412 |
+
if WORLD_SIZE > 1:
|
| 413 |
+
dist.barrier()
|
| 414 |
+
|
| 415 |
+
# Only RANK 0 handles the evaluation part
|
| 416 |
+
if RANK == 0:
|
| 417 |
+
# Prepare Submission Files for MMMU_TEST AND MMT-Bench_ALL
|
| 418 |
+
if dataset_name in ['MMMU_TEST']:
|
| 419 |
+
result_json = MMMU_result_transfer(result_file)
|
| 420 |
+
logger.info(f'Transfer MMMU_TEST result to json for official evaluation, '
|
| 421 |
+
f'json file saved in {result_json}')
|
| 422 |
+
continue
|
| 423 |
+
elif 'MMT-Bench_ALL' in dataset_name:
|
| 424 |
+
submission_file = MMTBench_result_transfer(result_file, **judge_kwargs)
|
| 425 |
+
logger.info(f'Extract options from prediction of MMT-Bench FULL split for official evaluation '
|
| 426 |
+
f'(https://eval.ai/web/challenges/challenge-page/2328/overview), '
|
| 427 |
+
f'submission file saved in {submission_file}')
|
| 428 |
+
continue
|
| 429 |
+
|
| 430 |
+
# Skip the evaluation part if only infer
|
| 431 |
+
if args.mode == 'infer':
|
| 432 |
+
continue
|
| 433 |
+
|
| 434 |
+
# Skip the evaluation part if the dataset evaluation is not supported or annotations are missing
|
| 435 |
+
if 'MLLMGuard_DS' in dataset_name:
|
| 436 |
+
logger.info('The evaluation of MLLMGuard_DS is not supported yet. ')
|
| 437 |
+
continue
|
| 438 |
+
elif 'AesBench_TEST' == dataset_name:
|
| 439 |
+
logger.info(f'The results are saved in {result_file}. '
|
| 440 |
+
f'Please send it to the AesBench Team via huangyipo@hotmail.com.')
|
| 441 |
+
continue
|
| 442 |
+
elif dataset_name in ['DocVQA_TEST', 'InfoVQA_TEST', 'Q-Bench1_TEST', 'A-Bench_TEST']:
|
| 443 |
+
logger.info(f'{dataset_name} is a test split without ground-truth. '
|
| 444 |
+
'Thus only the inference part is supported for those datasets. ')
|
| 445 |
+
continue
|
| 446 |
+
elif dataset_name in [
|
| 447 |
+
'MMBench_TEST_CN', 'MMBench_TEST_EN', 'MMBench', 'MMBench_CN',
|
| 448 |
+
'MMBench_TEST_CN_V11', 'MMBench_TEST_EN_V11', 'MMBench_V11', 'MMBench_CN_V11'
|
| 449 |
+
] and not MMBenchOfficialServer(dataset_name):
|
| 450 |
+
logger.error(
|
| 451 |
+
f'Can not evaluate {dataset_name} on non-official servers, will skip the evaluation.')
|
| 452 |
+
continue
|
| 453 |
+
|
| 454 |
+
# Setup the proxy for the evaluation
|
| 455 |
+
eval_proxy = os.environ.get('EVAL_PROXY', None)
|
| 456 |
+
old_proxy = os.environ.get('HTTP_PROXY', '')
|
| 457 |
+
if eval_proxy is not None:
|
| 458 |
+
proxy_set(eval_proxy)
|
| 459 |
+
|
| 460 |
+
# Perform the Evaluation
|
| 461 |
+
eval_results = dataset.evaluate(result_file, **judge_kwargs)
|
| 462 |
+
# Display Evaluation Results in Terminal
|
| 463 |
+
if eval_results is not None:
|
| 464 |
+
assert isinstance(eval_results, dict) or isinstance(eval_results, pd.DataFrame)
|
| 465 |
+
logger.info(f'The evaluation of model {model_name} x dataset {dataset_name} has finished! ')
|
| 466 |
+
logger.info('Evaluation Results:')
|
| 467 |
+
if isinstance(eval_results, dict):
|
| 468 |
+
logger.info('\n' + json.dumps(eval_results, indent=4))
|
| 469 |
+
elif isinstance(eval_results, pd.DataFrame):
|
| 470 |
+
if len(eval_results) < len(eval_results.columns):
|
| 471 |
+
eval_results = eval_results.T
|
| 472 |
+
logger.info('\n' + tabulate(eval_results))
|
| 473 |
+
|
| 474 |
+
# Restore the proxy
|
| 475 |
+
if eval_proxy is not None:
|
| 476 |
+
proxy_set(old_proxy)
|
| 477 |
+
|
| 478 |
+
# Create the symbolic links for the prediction files
|
| 479 |
+
files = os.listdir(pred_root)
|
| 480 |
+
files = [x for x in files if (f'{model_name}_{dataset_name}' in x or "status.json" in x)]
|
| 481 |
+
for f in files:
|
| 482 |
+
cwd = os.getcwd()
|
| 483 |
+
file_addr = osp.join(cwd, pred_root, f)
|
| 484 |
+
link_addr = osp.join(cwd, pred_root_meta, f)
|
| 485 |
+
if osp.exists(link_addr) or osp.islink(link_addr):
|
| 486 |
+
os.remove(link_addr)
|
| 487 |
+
os.symlink(file_addr, link_addr)
|
| 488 |
+
|
| 489 |
+
except Exception as e:
|
| 490 |
+
logger.exception(f'Model {model_name} x Dataset {dataset_name} combination failed: {e}, '
|
| 491 |
+
'skipping this combination.')
|
| 492 |
+
continue
|
| 493 |
+
|
| 494 |
+
if WORLD_SIZE > 1:
|
| 495 |
+
dist.destroy_process_group()
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
if __name__ == '__main__':
|
| 499 |
+
load_env()
|
| 500 |
+
main()
|
VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (161 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/__pycache__/cg_av_counting.cpython-310.pyc
ADDED
|
Binary file (12.6 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/cg_av_counting.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import snapshot_download
|
| 2 |
+
from ...smp import *
|
| 3 |
+
from ..video_base import VideoBaseDataset
|
| 4 |
+
from ..utils import build_judge, DEBUG_MESSAGE, cgbench
|
| 5 |
+
from .utils import *
|
| 6 |
+
from ...utils import track_progress_rich
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class CGAVCounting(VideoBaseDataset):
|
| 10 |
+
|
| 11 |
+
dataset = "CG-AV-Counting"
|
| 12 |
+
|
| 13 |
+
TYPE = "Video-Counting"
|
| 14 |
+
|
| 15 |
+
MD5 = "d1cd8486353ab85178098d443264a7d0"
|
| 16 |
+
|
| 17 |
+
SYS = ""
|
| 18 |
+
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
dataset="CG-AV-Counting",
|
| 22 |
+
use_frame_time=False,
|
| 23 |
+
nframe=0,
|
| 24 |
+
fps=-1,
|
| 25 |
+
):
|
| 26 |
+
super().__init__(dataset=dataset, nframe=nframe, fps=fps)
|
| 27 |
+
self.use_frame_time = use_frame_time
|
| 28 |
+
self.dataset_name = dataset
|
| 29 |
+
self.frame_tmpl_clue = 'frame-{}.jpg'
|
| 30 |
+
|
| 31 |
+
@classmethod
|
| 32 |
+
def supported_datasets(cls):
|
| 33 |
+
return ["CGAVCounting"]
|
| 34 |
+
|
| 35 |
+
def frame_paths_clue(self, video,timestamp_list):
|
| 36 |
+
frame_root = osp.join(self.frame_root, video)
|
| 37 |
+
os.makedirs(frame_root, exist_ok=True)
|
| 38 |
+
return [osp.join(frame_root, self.frame_tmpl_clue.format(i)) for i in timestamp_list]
|
| 39 |
+
|
| 40 |
+
def save_video_frames_clue(self, video,uid,timestamp_list):
|
| 41 |
+
if type(uid) is not str:
|
| 42 |
+
uid = str(uid)
|
| 43 |
+
import decord
|
| 44 |
+
frame_paths = self.frame_paths_clue(uid,timestamp_list)
|
| 45 |
+
flag = np.all([osp.exists(p) for p in frame_paths])
|
| 46 |
+
if flag:
|
| 47 |
+
frame = Image.open(frame_paths[0])
|
| 48 |
+
return frame_paths,frame.width,frame.height
|
| 49 |
+
vid_path = osp.join(self.data_root, video)
|
| 50 |
+
vid = decord.VideoReader(vid_path)
|
| 51 |
+
frames = []
|
| 52 |
+
# 获取视频的帧率
|
| 53 |
+
fps = vid.get_avg_fps()
|
| 54 |
+
lock_path = osp.splitext(vid_path)[0] + '.lock'
|
| 55 |
+
with portalocker.Lock(lock_path, 'w', timeout=30):
|
| 56 |
+
for timestamp_sec in timestamp_list:
|
| 57 |
+
# 计算视频帧对应的索引
|
| 58 |
+
frame_idx = int(timestamp_sec * fps)
|
| 59 |
+
|
| 60 |
+
# 获取对应帧
|
| 61 |
+
frame = vid[frame_idx]
|
| 62 |
+
|
| 63 |
+
# 将帧转换为PIL图像
|
| 64 |
+
img = Image.fromarray(frame.asnumpy())
|
| 65 |
+
frames.append(img)
|
| 66 |
+
for im, pth in zip(frames, frame_paths):
|
| 67 |
+
if not osp.exists(pth):
|
| 68 |
+
im.save(pth)
|
| 69 |
+
return frame_paths,frames[0].width,frames[0].height
|
| 70 |
+
|
| 71 |
+
def format_time(self,t):
|
| 72 |
+
return f"{t:.2f}"
|
| 73 |
+
|
| 74 |
+
def get_output_filename(self,item):
|
| 75 |
+
video_id = Path(item["video"]).stem
|
| 76 |
+
start_str = self.format_time(item["query_interval"][0])
|
| 77 |
+
end_str = self.format_time(item["query_interval"][1])
|
| 78 |
+
return f"{video_id}_{start_str}_{end_str}.mp4"
|
| 79 |
+
|
| 80 |
+
def prepare_dataset(self, dataset_name="CG-AV-Counting", repo_id="CG-Bench/CG-AV-Counting"):
|
| 81 |
+
|
| 82 |
+
def check_integrity(pth):
|
| 83 |
+
data_file = osp.join(pth, f"{dataset_name}.tsv")
|
| 84 |
+
|
| 85 |
+
if not os.path.exists(data_file):
|
| 86 |
+
return False
|
| 87 |
+
|
| 88 |
+
if md5(data_file) != self.MD5:
|
| 89 |
+
return False
|
| 90 |
+
data = load(data_file)
|
| 91 |
+
for video_pth in data["video"]:
|
| 92 |
+
if not osp.exists(osp.join(pth, video_pth)):
|
| 93 |
+
return False
|
| 94 |
+
return True
|
| 95 |
+
|
| 96 |
+
cache_path = get_cache_path(repo_id)
|
| 97 |
+
|
| 98 |
+
if cache_path is not None and check_integrity(cache_path):
|
| 99 |
+
dataset_path = cache_path
|
| 100 |
+
else:
|
| 101 |
+
|
| 102 |
+
def generate_tsv(pth):
|
| 103 |
+
|
| 104 |
+
tsv_file = osp.join(pth, f"{dataset_name}.tsv")
|
| 105 |
+
|
| 106 |
+
task_modes = ["long_acc", "ref_acc", "clue_acc"]
|
| 107 |
+
all_data = []
|
| 108 |
+
for task_mode in task_modes:
|
| 109 |
+
with open(osp.join(pth, "cg-av-counting.json"), "r") as f:
|
| 110 |
+
data_file = pd.DataFrame(json.load(f))
|
| 111 |
+
|
| 112 |
+
data_file = data_file.assign(index=range(len(data_file)))
|
| 113 |
+
data_file["video_uid"] = data_file["video"].replace(".mp4","")
|
| 114 |
+
data_file["video"] = data_file["video"].apply(lambda x: f"cg_videos_720p/{x}")
|
| 115 |
+
|
| 116 |
+
data_file["ref_video_path"] = ""
|
| 117 |
+
data_file["ref_video_uid"] = ""
|
| 118 |
+
|
| 119 |
+
if task_mode in ["ref_acc"]:
|
| 120 |
+
data_file["ref_video_path"] = data_file.apply(
|
| 121 |
+
lambda row: f"ref_videos/{self.get_output_filename(row)}", axis=1
|
| 122 |
+
)
|
| 123 |
+
data_file["ref_video_uid"] = data_file["ref_video_path"].apply(
|
| 124 |
+
lambda x: x.split("/")[-1].replace(".mp4", ""))
|
| 125 |
+
|
| 126 |
+
data_file["task_mode"] = task_mode
|
| 127 |
+
|
| 128 |
+
if task_mode == "clue_acc":
|
| 129 |
+
data_file["answer"] = data_file["clue"].apply(json.dumps)
|
| 130 |
+
|
| 131 |
+
data_file = data_file[
|
| 132 |
+
[
|
| 133 |
+
"index",
|
| 134 |
+
"video_uid",
|
| 135 |
+
"video",
|
| 136 |
+
"ref_video_path",
|
| 137 |
+
"ref_video_uid",
|
| 138 |
+
"question",
|
| 139 |
+
"answer",
|
| 140 |
+
"type",
|
| 141 |
+
"category",
|
| 142 |
+
"task_mode"
|
| 143 |
+
]
|
| 144 |
+
]
|
| 145 |
+
|
| 146 |
+
all_data.append(data_file)
|
| 147 |
+
|
| 148 |
+
final_data = pd.concat(all_data, ignore_index=True)
|
| 149 |
+
final_data["index"] = range(len(final_data))
|
| 150 |
+
final_data.to_csv(tsv_file, sep="\t", index=False)
|
| 151 |
+
dataset_path = cache_path
|
| 152 |
+
|
| 153 |
+
if modelscope_flag_set():
|
| 154 |
+
from modelscope import dataset_snapshot_download
|
| 155 |
+
|
| 156 |
+
dataset_path = dataset_snapshot_download(dataset_id=repo_id)
|
| 157 |
+
else:
|
| 158 |
+
dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset")
|
| 159 |
+
|
| 160 |
+
unzip_hf_zip(dataset_path)
|
| 161 |
+
|
| 162 |
+
generate_tsv(dataset_path)
|
| 163 |
+
|
| 164 |
+
tsv_file = osp.join(dataset_path, f"{dataset_name}.tsv")
|
| 165 |
+
|
| 166 |
+
return dict(data_file=tsv_file, root=dataset_path)
|
| 167 |
+
|
| 168 |
+
def build_prompt(self, line,video_llm):
|
| 169 |
+
if isinstance(line, int):
|
| 170 |
+
assert line < len(self)
|
| 171 |
+
line = self.data.iloc[line]
|
| 172 |
+
task_mode = line["task_mode"]
|
| 173 |
+
assert task_mode in ["long_acc","clue_acc","ref_acc"]
|
| 174 |
+
if task_mode == "long_acc":
|
| 175 |
+
user_prompt = ""
|
| 176 |
+
message = []
|
| 177 |
+
video_path = line["video"]
|
| 178 |
+
if video_llm:
|
| 179 |
+
message.append(dict(type="video", value=osp.join(self.data_root, video_path)))
|
| 180 |
+
else:
|
| 181 |
+
image_paths, frame_indices, vid_fps = self.save_video_frames(
|
| 182 |
+
video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps
|
| 183 |
+
)
|
| 184 |
+
message.extend(dict(type="image", value=im) for im in image_paths)
|
| 185 |
+
|
| 186 |
+
if self.use_frame_time:
|
| 187 |
+
user_prompt += get_timestampes(frame_indices, vid_fps)
|
| 188 |
+
|
| 189 |
+
user_prompt += (
|
| 190 |
+
f"Please answer the question '{line['question']}' with a number. Just output the number itself, "
|
| 191 |
+
"don't output anything else."
|
| 192 |
+
)
|
| 193 |
+
message.append(dict(type="text", value=user_prompt))
|
| 194 |
+
elif task_mode == "ref_acc":
|
| 195 |
+
user_prompt = ""
|
| 196 |
+
message = []
|
| 197 |
+
video_path = line["ref_video_path"]
|
| 198 |
+
if video_llm:
|
| 199 |
+
message.append(dict(type="video", value=osp.join(self.data_root, video_path)))
|
| 200 |
+
else:
|
| 201 |
+
image_paths, frame_indices, vid_fps = self.save_video_frames(
|
| 202 |
+
video_path, uid=line["ref_video_uid"], num_frames=self.nframe, fps=self.fps
|
| 203 |
+
)
|
| 204 |
+
message.extend(dict(type="image", value=im) for im in image_paths)
|
| 205 |
+
|
| 206 |
+
if self.use_frame_time:
|
| 207 |
+
user_prompt += get_timestampes(frame_indices, vid_fps)
|
| 208 |
+
user_prompt += (
|
| 209 |
+
f"Please answer the question '{line['question']}' with a number. Just output the number itself, "
|
| 210 |
+
"don't output anything else."
|
| 211 |
+
)
|
| 212 |
+
message.append(dict(type="text", value=user_prompt))
|
| 213 |
+
elif task_mode == "clue_acc":
|
| 214 |
+
if line["category"] == "event":
|
| 215 |
+
user_prompt = ""
|
| 216 |
+
message = []
|
| 217 |
+
video_path = line["video"]
|
| 218 |
+
if video_llm:
|
| 219 |
+
message.append(dict(type="video", value=osp.join(self.data_root, video_path)))
|
| 220 |
+
else:
|
| 221 |
+
image_paths, frame_indices, vid_fps = self.save_video_frames(
|
| 222 |
+
video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps
|
| 223 |
+
)
|
| 224 |
+
message.extend(dict(type="image", value=im) for im in image_paths)
|
| 225 |
+
user_prompt += get_timestampes(frame_indices, vid_fps)
|
| 226 |
+
|
| 227 |
+
user_prompt += (
|
| 228 |
+
f"Watch the video and provide your answer to the question '{line['question']}', "
|
| 229 |
+
"including the start and end timestamps for each event."
|
| 230 |
+
"Format your answer in JSON, enclosed in <answer> and </answer> tags. "
|
| 231 |
+
"The output should look like this: <answer>[[\"start_time\", \"end_time\"], ...]</answer>. "
|
| 232 |
+
"Ensure each timestamp is in seconds (e.g., 'xx.xx')."
|
| 233 |
+
)
|
| 234 |
+
message.append(dict(type="text", value=user_prompt))
|
| 235 |
+
elif line["category"] == "object":
|
| 236 |
+
user_prompt = ""
|
| 237 |
+
message = []
|
| 238 |
+
video_path = line["video"]
|
| 239 |
+
clue_timestamp_list = []
|
| 240 |
+
for clue in json.loads(line["answer"]):
|
| 241 |
+
if clue["timestamp"] not in clue_timestamp_list:
|
| 242 |
+
clue_timestamp_list.append(clue["timestamp"])
|
| 243 |
+
image_paths, width, height = self.save_video_frames_clue(
|
| 244 |
+
video_path, uid=line["video_uid"], timestamp_list=clue_timestamp_list
|
| 245 |
+
)
|
| 246 |
+
message.append(
|
| 247 |
+
dict(type="text", value=f"There are {len(image_paths)} frames in the size of {width}x{height}"))
|
| 248 |
+
for idx,im in enumerate(image_paths):
|
| 249 |
+
message.append(dict(type="text", value=f"Frame{idx + 1}:"))
|
| 250 |
+
message.append(dict(type="image", value=im))
|
| 251 |
+
user_prompt += (
|
| 252 |
+
f"Answer the question '{line['question']}', "
|
| 253 |
+
"including the bounding box for the query object in the first frame "
|
| 254 |
+
"where it appears. For subsequent frames where the object appears, "
|
| 255 |
+
"do not provide the bounding box again. "
|
| 256 |
+
"Format your answer in JSON, enclosed within <answer> and </answer> tags. "
|
| 257 |
+
"The output should look like this: "
|
| 258 |
+
"<answer>{\"Frame1\": [[x_min, y_min, x_max, y_max]], \"Frame2\": [...],...}</answer>. "
|
| 259 |
+
"In the output, each frame should either contain the bounding box of the object "
|
| 260 |
+
"(if it appears for the first time in that frame) or an empty list `[]` "
|
| 261 |
+
"(if the object does not appear or it has already been labeled in a previous frame). "
|
| 262 |
+
"Ensure that bounding boxes are listed as [x_min, y_min, x_max, y_max]."
|
| 263 |
+
)
|
| 264 |
+
message.append(dict(type="text", value=user_prompt))
|
| 265 |
+
elif line["category"] == "attribute":
|
| 266 |
+
user_prompt = ""
|
| 267 |
+
message = []
|
| 268 |
+
video_path = line["video"]
|
| 269 |
+
clue_timestamp_list = []
|
| 270 |
+
for clue_ in json.loads(line["answer"]):
|
| 271 |
+
for clue in clue_:
|
| 272 |
+
if clue["timestamp"] not in clue_timestamp_list:
|
| 273 |
+
clue_timestamp_list.append(clue["timestamp"])
|
| 274 |
+
image_paths,width,height = self.save_video_frames_clue(
|
| 275 |
+
video_path, uid=line["video_uid"],timestamp_list=clue_timestamp_list
|
| 276 |
+
)
|
| 277 |
+
message.append(dict(
|
| 278 |
+
type="text",
|
| 279 |
+
value=f"There are {len(image_paths)} frames in the size of {width}x{height}"))
|
| 280 |
+
for idx,im in enumerate(image_paths):
|
| 281 |
+
message.append(dict(type="text", value=f"Frame{idx + 1}:"))
|
| 282 |
+
message.append(dict(type="image", value=im))
|
| 283 |
+
user_prompt += (
|
| 284 |
+
f"Answer the question '{line['question']}', clustering the objects according to the question. "
|
| 285 |
+
"For each unique cluster, assign a unique label and return the bounding box for each object in "
|
| 286 |
+
"the first frame where it appears. For subsequent frames where the object appears, "
|
| 287 |
+
"do not output anything. "
|
| 288 |
+
"Format your answer in JSON, enclosed within <answer> and </answer> tags. "
|
| 289 |
+
"The output should look like this: "
|
| 290 |
+
"<answer>{\"Frame 1\": [{\"bbox\": [x_min, y_min, x_max, y_max], 'label': \"Label 1\"}], "
|
| 291 |
+
"\"Frame 2\": [...], ...}</answer>. "
|
| 292 |
+
"In the output, each frame should either contain the bounding box and label for the object "
|
| 293 |
+
"(if it appears for the first time in that frame) or an empty list `[]` "
|
| 294 |
+
"(if the object has already been labeled or does not appear in that frame). "
|
| 295 |
+
"The label should correspond to a unique object cluster according to the question."
|
| 296 |
+
)
|
| 297 |
+
message.append(dict(type="text", value=user_prompt))
|
| 298 |
+
print(message)
|
| 299 |
+
return message
|
| 300 |
+
|
| 301 |
+
def save_video_frames(self, video, uid, num_frames=8, fps=-1):
|
| 302 |
+
|
| 303 |
+
if type(uid) is not str:
|
| 304 |
+
uid = str(uid)
|
| 305 |
+
import decord
|
| 306 |
+
vid_path = osp.join(self.data_root, video)
|
| 307 |
+
vid = decord.VideoReader(vid_path)
|
| 308 |
+
vid_fps = vid.get_avg_fps()
|
| 309 |
+
n_frames = len(vid)
|
| 310 |
+
|
| 311 |
+
if num_frames > 0 and fps < 0:
|
| 312 |
+
step_size = len(vid) / (num_frames + 1)
|
| 313 |
+
indices = [int(i * step_size) for i in range(1, num_frames + 1)]
|
| 314 |
+
|
| 315 |
+
frame_paths = self.frame_paths(uid)
|
| 316 |
+
elif fps > 0:
|
| 317 |
+
total_duration = n_frames / vid_fps
|
| 318 |
+
required_frames = int(total_duration * fps)
|
| 319 |
+
step_size = vid_fps / fps
|
| 320 |
+
indices = [int(i * step_size) for i in range(required_frames)]
|
| 321 |
+
frame_paths = self.frame_paths_fps(uid, len(indices))
|
| 322 |
+
|
| 323 |
+
# Save and validate frames
|
| 324 |
+
valid_paths = []
|
| 325 |
+
valid_indices = []
|
| 326 |
+
lock_path = osp.splitext(vid_path)[0] + '.lock'
|
| 327 |
+
with portalocker.Lock(lock_path, 'w', timeout=30):
|
| 328 |
+
if not np.all([osp.exists(p) for p in frame_paths]):
|
| 329 |
+
images = [vid[i].asnumpy() for i in indices]
|
| 330 |
+
for i, (img_array, path) in enumerate(zip(images, frame_paths)):
|
| 331 |
+
if osp.exists(path):
|
| 332 |
+
try:
|
| 333 |
+
with Image.open(path) as img:
|
| 334 |
+
img.verify()
|
| 335 |
+
valid_paths.append(path)
|
| 336 |
+
valid_indices.append(indices[i])
|
| 337 |
+
except Exception:
|
| 338 |
+
continue
|
| 339 |
+
else:
|
| 340 |
+
try:
|
| 341 |
+
img = Image.fromarray(img_array)
|
| 342 |
+
img.save(path)
|
| 343 |
+
img.verify()
|
| 344 |
+
valid_paths.append(path)
|
| 345 |
+
valid_indices.append(indices[i])
|
| 346 |
+
except Exception:
|
| 347 |
+
continue
|
| 348 |
+
else:
|
| 349 |
+
for i, path in enumerate(frame_paths):
|
| 350 |
+
try:
|
| 351 |
+
with Image.open(path) as img:
|
| 352 |
+
img.verify()
|
| 353 |
+
valid_paths.append(path)
|
| 354 |
+
valid_indices.append(indices[i])
|
| 355 |
+
except Exception:
|
| 356 |
+
continue
|
| 357 |
+
|
| 358 |
+
return valid_paths, valid_indices, vid_fps
|
| 359 |
+
|
| 360 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
| 361 |
+
|
| 362 |
+
assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], \
|
| 363 |
+
'data file should be an supported format (xlsx/json/tsv) file'
|
| 364 |
+
|
| 365 |
+
tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json')
|
| 366 |
+
score_file = get_intermediate_file_path(eval_file, '_score', 'csv')
|
| 367 |
+
|
| 368 |
+
data = load(eval_file)
|
| 369 |
+
|
| 370 |
+
data_un = data[~pd.isna(data["prediction"])]
|
| 371 |
+
data_pred_na = data[pd.isna(data["prediction"])]
|
| 372 |
+
|
| 373 |
+
data_pred_na["score"] = -1
|
| 374 |
+
|
| 375 |
+
scores_df = data_un.apply(
|
| 376 |
+
lambda row: post_process(
|
| 377 |
+
response=row["prediction"],
|
| 378 |
+
right_answer=row["answer"],
|
| 379 |
+
task_mode=row["task_mode"],
|
| 380 |
+
category=row["category"]
|
| 381 |
+
),
|
| 382 |
+
axis=1,
|
| 383 |
+
result_type='expand'
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
data_un = pd.concat([data_un, scores_df], axis=1)
|
| 387 |
+
|
| 388 |
+
data = pd.concat([data_pred_na, data_un])
|
| 389 |
+
|
| 390 |
+
rejected_count = (data["score"] == -1).sum()
|
| 391 |
+
|
| 392 |
+
print(
|
| 393 |
+
f"Among {len(data)} questions, "
|
| 394 |
+
f"failed to obtain prediction for {len(data_pred_na)} questions, "
|
| 395 |
+
f"failed to obtain the score for {rejected_count - len(data_pred_na)} questions. "
|
| 396 |
+
f"Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating."
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
dump(data, score_file)
|
| 400 |
+
|
| 401 |
+
rating = rating_func(score_file)
|
| 402 |
+
|
| 403 |
+
dump(rating, tgt_file)
|
| 404 |
+
|
| 405 |
+
return rating
|
VLMEvalKit-sudoku/vlmeval/dataset/CGAVCounting/utils.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
from ...smp import *
|
| 5 |
+
import numpy as np
|
| 6 |
+
import re
|
| 7 |
+
import zipfile
|
| 8 |
+
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
import signal
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def rating_func(data_path):
|
| 15 |
+
df = load(data_path)
|
| 16 |
+
|
| 17 |
+
task_mode_fields = {
|
| 18 |
+
"long_acc": ["acc", "oboa", "mae", "rmse"],
|
| 19 |
+
"ref_acc": ["acc", "oboa", "mae", "rmse"],
|
| 20 |
+
"clue_acc": ["wcs", "ifa"],
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
rating = {}
|
| 24 |
+
|
| 25 |
+
for task_mode, fields in task_mode_fields.items():
|
| 26 |
+
sub_df = df[df["task_mode"] == task_mode]
|
| 27 |
+
for field in fields:
|
| 28 |
+
values = sub_df[field]
|
| 29 |
+
if field == "rmse":
|
| 30 |
+
# RMSE: sqrt(mean(x^2))
|
| 31 |
+
rmse_val = np.sqrt(values.mean())
|
| 32 |
+
rating[f"{task_mode}/rmse"] = round(rmse_val, 4)
|
| 33 |
+
else:
|
| 34 |
+
rating[f"{task_mode}/{field}"] = round(values.mean(), 4)
|
| 35 |
+
|
| 36 |
+
return rating
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_timestampes(frame_indices, fps):
|
| 40 |
+
seconds = list(map(lambda x: str(round(x / fps, 4)), frame_indices))
|
| 41 |
+
timestamps = ", ".join(seconds)
|
| 42 |
+
return "A total of {frame_num} frames are sampled. Their corresponding timestamps are:\n\n{timestamps}\n\n".format(
|
| 43 |
+
frame_num=len(frame_indices), timestamps=timestamps
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def time_str_to_seconds(time_str: str) -> float:
|
| 48 |
+
time_str = time_str.strip()
|
| 49 |
+
if '.' in time_str:
|
| 50 |
+
time_main, milliseconds = time_str.split('.')
|
| 51 |
+
milliseconds = float(f"0.{milliseconds}")
|
| 52 |
+
else:
|
| 53 |
+
time_main = time_str
|
| 54 |
+
milliseconds = 0.0
|
| 55 |
+
|
| 56 |
+
parts = list(map(int, time_main.split(":")))
|
| 57 |
+
|
| 58 |
+
if len(parts) == 2:
|
| 59 |
+
minutes, seconds = parts
|
| 60 |
+
total_seconds = minutes * 60 + seconds
|
| 61 |
+
elif len(parts) == 3:
|
| 62 |
+
hours, minutes, seconds = parts
|
| 63 |
+
total_seconds = hours * 3600 + minutes * 60 + seconds
|
| 64 |
+
else:
|
| 65 |
+
raise ValueError(f"Invalid time format: {time_str}")
|
| 66 |
+
|
| 67 |
+
return total_seconds + milliseconds
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def extract_outer_json(text):
|
| 71 |
+
stack = []
|
| 72 |
+
start_idx = None
|
| 73 |
+
opening = {'{': '}', '[': ']'}
|
| 74 |
+
closing = {'}': '{', ']': '['}
|
| 75 |
+
|
| 76 |
+
for i, char in enumerate(text):
|
| 77 |
+
if char in opening:
|
| 78 |
+
if not stack:
|
| 79 |
+
start_idx = i # 最外层起点
|
| 80 |
+
stack.append(char)
|
| 81 |
+
elif char in closing:
|
| 82 |
+
if stack and stack[-1] == closing[char]:
|
| 83 |
+
stack.pop()
|
| 84 |
+
if not stack and start_idx is not None:
|
| 85 |
+
candidate = text[start_idx:i + 1]
|
| 86 |
+
try:
|
| 87 |
+
return json.dumps(json.loads(candidate))
|
| 88 |
+
except json.JSONDecodeError:
|
| 89 |
+
continue # 尝试下一个 JSON 块
|
| 90 |
+
return None
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def compute_tiou(t1, t2):
|
| 94 |
+
"""Temporal IoU"""
|
| 95 |
+
inter_start = max(t1[0], t2[0])
|
| 96 |
+
inter_end = min(t1[1], t2[1])
|
| 97 |
+
inter = max(0.0, inter_end - inter_start)
|
| 98 |
+
union = max(t1[1], t2[1]) - min(t1[0], t2[0])
|
| 99 |
+
return inter / union if union > 0 else 0.0
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def compute_sIoU(box1, box2):
|
| 103 |
+
"""
|
| 104 |
+
Complete IoU (sIoU) between two bounding boxes.
|
| 105 |
+
Args:
|
| 106 |
+
box1 (list or np.array): [x1, y1, x2, y2] of ground truth box
|
| 107 |
+
box2 (list or np.array): [x1, y1, x2, y2] of predicted box
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
IoU (float): The IoU score between the two boxes.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
# Ensure the coordinates are ordered: [min_x, min_y, max_x, max_y]
|
| 114 |
+
box1 = np.array([min(box1[0], box1[2]), min(box1[1], box1[3]),
|
| 115 |
+
max(box1[0], box1[2]), max(box1[1], box1[3])])
|
| 116 |
+
box2 = np.array([min(box2[0], box2[2]), min(box2[1], box2[3]),
|
| 117 |
+
max(box2[0], box2[2]), max(box2[1], box2[3])])
|
| 118 |
+
|
| 119 |
+
# Compute the intersection area
|
| 120 |
+
inter_x1 = max(box1[0], box2[0])
|
| 121 |
+
inter_y1 = max(box1[1], box2[1])
|
| 122 |
+
inter_x2 = min(box1[2], box2[2])
|
| 123 |
+
inter_y2 = min(box1[3], box2[3])
|
| 124 |
+
|
| 125 |
+
inter_area = max(0, inter_x2 - inter_x1) * max(0, inter_y2 - inter_y1)
|
| 126 |
+
|
| 127 |
+
# Compute areas of the individual boxes
|
| 128 |
+
area1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
| 129 |
+
area2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
| 130 |
+
|
| 131 |
+
# Compute union area
|
| 132 |
+
union = area1 + area2 - inter_area
|
| 133 |
+
iou = inter_area / union if union > 0 else 0.0
|
| 134 |
+
|
| 135 |
+
return iou
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def greedy_matching(gt_instances, pred_instances, iou_func):
|
| 139 |
+
"""Greedy matching based on maximum IoU"""
|
| 140 |
+
unmatched_gt = set(range(len(gt_instances)))
|
| 141 |
+
unmatched_pred = set(range(len(pred_instances)))
|
| 142 |
+
matches = []
|
| 143 |
+
|
| 144 |
+
while unmatched_gt and unmatched_pred:
|
| 145 |
+
max_iou = -1
|
| 146 |
+
best_match = None
|
| 147 |
+
for gt_idx in unmatched_gt:
|
| 148 |
+
for pred_idx in unmatched_pred:
|
| 149 |
+
iou = iou_func(gt_instances[gt_idx], pred_instances[pred_idx])
|
| 150 |
+
if iou > max_iou:
|
| 151 |
+
max_iou = iou
|
| 152 |
+
best_match = (gt_idx, pred_idx)
|
| 153 |
+
|
| 154 |
+
if best_match:
|
| 155 |
+
gt_idx, pred_idx = best_match
|
| 156 |
+
matches.append((gt_idx, pred_idx))
|
| 157 |
+
unmatched_gt.remove(gt_idx)
|
| 158 |
+
unmatched_pred.remove(pred_idx)
|
| 159 |
+
|
| 160 |
+
return matches
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def compute_cluster_pair_wcs(gt, pred, iou_type):
|
| 164 |
+
if iou_type == 'tIoU':
|
| 165 |
+
loc_sum = 0.0
|
| 166 |
+
for g in gt:
|
| 167 |
+
loc_sum += max([compute_tiou(g, p) for p in pred] or [0.0])
|
| 168 |
+
loc_acc = loc_sum / len(gt) if gt else 0.0
|
| 169 |
+
count_penalty = 1.0 - abs(len(pred) - len(gt)) / max(len(gt), 1)
|
| 170 |
+
# count_penalty = 1.0
|
| 171 |
+
return math.sqrt(loc_acc * max(0, count_penalty))
|
| 172 |
+
|
| 173 |
+
elif iou_type == 'sIoU':
|
| 174 |
+
# group by frame index
|
| 175 |
+
from collections import defaultdict
|
| 176 |
+
gt_by_f = defaultdict(list)
|
| 177 |
+
pred_by_f = defaultdict(list)
|
| 178 |
+
for f, box in gt:
|
| 179 |
+
gt_by_f[f].append(box)
|
| 180 |
+
for f, box in pred:
|
| 181 |
+
pred_by_f[f].append(box)
|
| 182 |
+
|
| 183 |
+
all_f = set(gt_by_f) | set(pred_by_f)
|
| 184 |
+
wcs = 0.0
|
| 185 |
+
for f in all_f:
|
| 186 |
+
gt_f = gt_by_f.get(f, [])
|
| 187 |
+
pred_f = pred_by_f.get(f, [])
|
| 188 |
+
matches = greedy_matching(gt_f, pred_f, compute_sIoU)
|
| 189 |
+
loc_sum = sum([compute_sIoU(gt_f[i], pred_f[j]) for i, j in matches])
|
| 190 |
+
loc_acc = loc_sum / len(gt_f) if gt_f else 0.0
|
| 191 |
+
count_penalty = 1.0 - abs(len(pred_f) - len(gt_f)) / max(len(gt_f), 1)
|
| 192 |
+
# count_penalty = 1.0
|
| 193 |
+
wcs += math.sqrt(loc_acc * max(0, count_penalty))
|
| 194 |
+
return wcs / max(len(all_f), 1)
|
| 195 |
+
|
| 196 |
+
else:
|
| 197 |
+
raise ValueError("Unsupported iou_type")
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class TimeoutException(Exception):
|
| 201 |
+
pass
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def timeout_handler(signum, frame):
|
| 205 |
+
raise TimeoutException("Function execution exceeded the time limit.")
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def compute_wcs_unlabeled(gt_clusters, pred_clusters, iou_type='tIoU',
|
| 209 |
+
timeout=10): # 主要是给attribute用的,但是object和event视作一个cluster也能用
|
| 210 |
+
from scipy.optimize import linear_sum_assignment
|
| 211 |
+
# Set the timeout signal handler
|
| 212 |
+
signal.signal(signal.SIGALRM, timeout_handler)
|
| 213 |
+
signal.alarm(timeout) # Set the alarm to go off in 'timeout' seconds
|
| 214 |
+
|
| 215 |
+
try:
|
| 216 |
+
# Original function logic
|
| 217 |
+
K = len(gt_clusters)
|
| 218 |
+
M = len(pred_clusters)
|
| 219 |
+
|
| 220 |
+
# Build cost matrix (we want max score → min cost)
|
| 221 |
+
score_matrix = np.zeros((K, M))
|
| 222 |
+
for i in range(K):
|
| 223 |
+
for j in range(M):
|
| 224 |
+
score_matrix[i, j] = compute_cluster_pair_wcs(gt_clusters[i], pred_clusters[j], iou_type)
|
| 225 |
+
|
| 226 |
+
cost_matrix = -score_matrix # maximize score → minimize cost
|
| 227 |
+
|
| 228 |
+
row_ind, col_ind = linear_sum_assignment(cost_matrix)
|
| 229 |
+
|
| 230 |
+
matched_scores = [score_matrix[i, j] for i, j in zip(row_ind, col_ind)]
|
| 231 |
+
|
| 232 |
+
# WCS = average over gt clusters (including unmatched = 0)
|
| 233 |
+
total_wcs = sum(matched_scores)
|
| 234 |
+
return total_wcs / K
|
| 235 |
+
|
| 236 |
+
except TimeoutException:
|
| 237 |
+
print(gt_clusters, pred_clusters)
|
| 238 |
+
print("Function execution exceeded the time limit.")
|
| 239 |
+
return None # or you can return some default value to indicate timeout
|
| 240 |
+
|
| 241 |
+
finally:
|
| 242 |
+
signal.alarm(0) # Cancel the alarm after the function completes or times out
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def post_process(response, right_answer, task_mode, category):
|
| 246 |
+
from word2number import w2n
|
| 247 |
+
if task_mode in ["long_acc", "ref_acc"]:
|
| 248 |
+
result = {"acc": 0, "oboa": 0, "mae": 0, "rmse": 0}
|
| 249 |
+
if response:
|
| 250 |
+
try:
|
| 251 |
+
pred = w2n.word_to_num(response)
|
| 252 |
+
except:
|
| 253 |
+
pred = 0
|
| 254 |
+
if abs(float(right_answer) - float(pred)) <= 1e-5:
|
| 255 |
+
result["acc"] = 1
|
| 256 |
+
|
| 257 |
+
if abs(float(right_answer) - float(pred)) <= 1:
|
| 258 |
+
result["oboa"] = 1
|
| 259 |
+
|
| 260 |
+
if abs(float(right_answer) - float(pred)) <= max(2 * float(right_answer),100):
|
| 261 |
+
result["mae"] = abs(float(right_answer) - float(pred))
|
| 262 |
+
result["rmse"] = abs(float(right_answer) - float(pred)) ** 2
|
| 263 |
+
else:
|
| 264 |
+
result["mae"] = abs(float(right_answer) * 2)
|
| 265 |
+
result["rmse"] = abs(float(right_answer) * 2) ** 2
|
| 266 |
+
elif task_mode == "clue_acc":
|
| 267 |
+
result = {"wcs": 0, "ifa": 0}
|
| 268 |
+
if response:
|
| 269 |
+
clues = json.loads(right_answer)
|
| 270 |
+
content_match = re.search(r"<answer>(.*?)</answer>", response, re.DOTALL)
|
| 271 |
+
student_answer = content_match.group(1).strip() if content_match else response.strip()
|
| 272 |
+
j = None
|
| 273 |
+
try:
|
| 274 |
+
try:
|
| 275 |
+
j = json.loads(student_answer)
|
| 276 |
+
except:
|
| 277 |
+
j = json.loads(extract_outer_json(student_answer))
|
| 278 |
+
except:
|
| 279 |
+
pass
|
| 280 |
+
if j is not None:
|
| 281 |
+
try:
|
| 282 |
+
if category == "event":
|
| 283 |
+
pred = []
|
| 284 |
+
for e in j:
|
| 285 |
+
|
| 286 |
+
if isinstance(e[0],str) and isinstance(e[1],str) and ":" in e[0] and ":" in e[1]:
|
| 287 |
+
pred.append([time_str_to_seconds(e[0]), time_str_to_seconds(e[1])])
|
| 288 |
+
else:
|
| 289 |
+
pred.append([float(e[0].split(" ")[0]) if isinstance(e[0],str) else e[0],
|
| 290 |
+
float(e[1].split(" ")[0]) if isinstance(e[1],str) else e[1]])
|
| 291 |
+
gt = []
|
| 292 |
+
for e in clues:
|
| 293 |
+
gt.append([float(e['start']), float(e['end'])])
|
| 294 |
+
|
| 295 |
+
result["wcs"] = compute_wcs_unlabeled([gt], [pred], "tIoU")
|
| 296 |
+
result["ifa"] = 1
|
| 297 |
+
elif category == "object":
|
| 298 |
+
gt = []
|
| 299 |
+
clue_timestamp_list = []
|
| 300 |
+
for clue in clues:
|
| 301 |
+
if clue["timestamp"] not in clue_timestamp_list:
|
| 302 |
+
clue_timestamp_list.append(clue["timestamp"])
|
| 303 |
+
for clue in clues:
|
| 304 |
+
gt.append((clue_timestamp_list.index(clue["timestamp"]), clue['bbox']))
|
| 305 |
+
pred = []
|
| 306 |
+
for key in j.keys():
|
| 307 |
+
if "Frame" not in key:
|
| 308 |
+
continue
|
| 309 |
+
idx = int(key.replace("Frame", "")) - 1
|
| 310 |
+
if len(j[key]) == 0:
|
| 311 |
+
continue
|
| 312 |
+
if isinstance(j[key][0],list) and len(j[key][0]) == 4:
|
| 313 |
+
for e in j[key]:
|
| 314 |
+
if isinstance(e,list) and len(e) == 4:
|
| 315 |
+
pred.append((idx, e))
|
| 316 |
+
elif isinstance(j[key][0],list) and len(j[key][0]) == 2:
|
| 317 |
+
for ii in range(int(len(j[key]) // 2)):
|
| 318 |
+
if isinstance(j[key][ii * 2],list) and len(j[key][ii * 2]) == 2 and isinstance(
|
| 319 |
+
j[key][ii * 2 + 1],list) and len(j[key][ii * 2 + 1]) == 2:
|
| 320 |
+
pred.append((idx, [j[key][ii * 2][0], j[key][ii * 2][1], j[key][ii * 2 + 1][0],
|
| 321 |
+
j[key][ii * 2 + 1][1]]))
|
| 322 |
+
result["wcs"] = compute_wcs_unlabeled([gt], [pred], "sIoU")
|
| 323 |
+
result["ifa"] = 1
|
| 324 |
+
elif category == "attribute":
|
| 325 |
+
gt = []
|
| 326 |
+
clue_timestamp_list = []
|
| 327 |
+
for clue_ in clues:
|
| 328 |
+
for clue in clue_:
|
| 329 |
+
if clue["timestamp"] not in clue_timestamp_list:
|
| 330 |
+
clue_timestamp_list.append(clue["timestamp"])
|
| 331 |
+
for clue_ in clues:
|
| 332 |
+
gt_ = []
|
| 333 |
+
for clue in clue_:
|
| 334 |
+
gt_.append((clue_timestamp_list.index(clue["timestamp"]), clue['bbox']))
|
| 335 |
+
gt.append(gt_)
|
| 336 |
+
pred = {}
|
| 337 |
+
for key in j.keys():
|
| 338 |
+
if "Frame" not in key:
|
| 339 |
+
continue
|
| 340 |
+
idx = int(key.replace("Frame", "")) - 1
|
| 341 |
+
for e in j[key]:
|
| 342 |
+
if e['label'] not in pred.keys():
|
| 343 |
+
pred[e['label']] = []
|
| 344 |
+
if 'bbox' in e:
|
| 345 |
+
if isinstance(e['bbox'],list) and len(e['bbox']) == 4:
|
| 346 |
+
pred[e['label']].append((idx, e['bbox']))
|
| 347 |
+
if 'bbox_2d' in e:
|
| 348 |
+
if isinstance(e['bbox_2d'],list) and len(e['bbox_2d']) == 4:
|
| 349 |
+
pred[e['label']].append((idx, e['bbox_2d']))
|
| 350 |
+
pred_list = [pred[key] for key in pred]
|
| 351 |
+
result["wcs"] = compute_wcs_unlabeled(gt, pred_list, "sIoU")
|
| 352 |
+
result["ifa"] = 1
|
| 353 |
+
except:
|
| 354 |
+
pass
|
| 355 |
+
|
| 356 |
+
return result
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def get_chunk_number(filename):
|
| 360 |
+
try:
|
| 361 |
+
num = filename.split("chunk_")[1].split(".zip")[0]
|
| 362 |
+
return int(num)
|
| 363 |
+
except:
|
| 364 |
+
return float('inf')
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def auto_merge_and_unzip_parts(target_dir, extract_dir, zip_prefix=None):
|
| 368 |
+
target_dir = Path(target_dir)
|
| 369 |
+
extract_dir = Path(extract_dir)
|
| 370 |
+
extract_dir.mkdir(parents=True, exist_ok=True)
|
| 371 |
+
|
| 372 |
+
# 匹配 zip 分卷:例如 video_chunk_001.zip.part000
|
| 373 |
+
part_files = sorted(target_dir.glob("*.zip.part*"))
|
| 374 |
+
groups = {}
|
| 375 |
+
|
| 376 |
+
# 分组:根据前缀提取 group 名(即 zip 文件名)
|
| 377 |
+
for part_file in part_files:
|
| 378 |
+
match = re.match(r"(.*\.zip)\.part\d+$", part_file.name)
|
| 379 |
+
if match:
|
| 380 |
+
zip_name = match.group(1)
|
| 381 |
+
if zip_prefix is None or Path(zip_name).stem.startswith(zip_prefix):
|
| 382 |
+
groups.setdefault(zip_name, []).append(part_file)
|
| 383 |
+
|
| 384 |
+
if not groups:
|
| 385 |
+
print(f"No matching zip parts found with prefix: {zip_prefix}")
|
| 386 |
+
return
|
| 387 |
+
|
| 388 |
+
# 合并每一组分卷 -> 解压
|
| 389 |
+
for zip_name, parts in tqdm(groups.items(), desc="Merging and unzipping"):
|
| 390 |
+
parts = sorted(parts, key=lambda p: int(p.name.split("part")[-1]))
|
| 391 |
+
zip_path = target_dir / zip_name
|
| 392 |
+
|
| 393 |
+
# 合并分卷
|
| 394 |
+
with open(zip_path, 'wb') as outfile:
|
| 395 |
+
for part in parts:
|
| 396 |
+
with open(part, 'rb') as infile:
|
| 397 |
+
outfile.write(infile.read())
|
| 398 |
+
|
| 399 |
+
# 解压合并后的 zip 文件
|
| 400 |
+
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
| 401 |
+
zip_ref.extractall(extract_dir)
|
| 402 |
+
|
| 403 |
+
# 删除合并后的 zip 文件(可注释)
|
| 404 |
+
zip_path.unlink()
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def unzip_hf_zip(target_dir):
|
| 408 |
+
target_dir = Path(target_dir)
|
| 409 |
+
|
| 410 |
+
videos_dir = target_dir / "cg_videos_720p"
|
| 411 |
+
ref_videos_dir = target_dir / "ref_videos"
|
| 412 |
+
|
| 413 |
+
if videos_dir.exists() and ref_videos_dir.exists():
|
| 414 |
+
print("all target dirs exist, skip.")
|
| 415 |
+
return
|
| 416 |
+
|
| 417 |
+
videos_dir.mkdir(parents=True, exist_ok=True)
|
| 418 |
+
|
| 419 |
+
auto_merge_and_unzip_parts(target_dir,ref_videos_dir, zip_prefix="ref_videos")
|
| 420 |
+
auto_merge_and_unzip_parts(target_dir,videos_dir, zip_prefix="videos")
|
| 421 |
+
|
| 422 |
+
print("sucessfully unzip all files.")
|
VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/README.md
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EgoExoBench
|
| 2 |
+
|
| 3 |
+
This is the official repository of [EgoExoBench: A
|
| 4 |
+
Benchmark for First- and Third-person View Video
|
| 5 |
+
Understanding in MLLMs]()
|
| 6 |
+
|
| 7 |
+
## 📊 Benchmark Overview
|
| 8 |
+
|
| 9 |
+
**EgoExoBench** is a large-scale benchmark designed to evaluate cross-view video understanding in multimodal large language models (MLLMs). It contains paired egocentric–exocentric videos and over **7,300 multiple-choice questions** across **11 subtasks**, covering three key dimensions of ego–exo reasoning:
|
| 10 |
+
|
| 11 |
+
* **Ego-Exo Relation**
|
| 12 |
+
* **Ego-Exo View Transition**
|
| 13 |
+
* **Ego-Exo Temporal Reasoning**
|
| 14 |
+
|
| 15 |
+
## 📝 Data Preparation
|
| 16 |
+
|
| 17 |
+
### Video Data
|
| 18 |
+
|
| 19 |
+
EgoExoBench builds upon six publicly available ego–exo datasets.
|
| 20 |
+
|
| 21 |
+
* [Ego-Exo4D](https://ego-exo4d-data.org/)
|
| 22 |
+
* [LEMMA](https://sites.google.com/view/lemma-activity)
|
| 23 |
+
* [EgoExoLearn](https://huggingface.co/datasets/hyf015/EgoExoLearn)
|
| 24 |
+
* [TF2023](https://github.com/ziweizhao1993/PEN)
|
| 25 |
+
* [EgoMe](https://huggingface.co/datasets/HeqianQiu/EgoMe)
|
| 26 |
+
* [CVMHAT](https://github.com/RuizeHan/CVMHT)
|
| 27 |
+
|
| 28 |
+
The script will automatically download the processed video data, **except Ego-Exo4D**, due to license restrictions. You need to manually download it from the [official website](https://ego-exo4d-data.org/) and organize it as shown below.
|
| 29 |
+
|
| 30 |
+
If you prefer to download all datasets manually, you can simply create empty `processed_videos/` and `processed_frames/` folders and organize the datasets in the following structure:
|
| 31 |
+
|
| 32 |
+
```
|
| 33 |
+
[LMUData]/videos/EgoExoBench
|
| 34 |
+
├── CVMHAT/
|
| 35 |
+
│ └── data/
|
| 36 |
+
├── EgoExo4D/
|
| 37 |
+
│ └── takes/
|
| 38 |
+
├── EgoExoLearn/
|
| 39 |
+
├── EgoMe/
|
| 40 |
+
├── LEMMA/
|
| 41 |
+
├── TF2023/
|
| 42 |
+
│ └── data/
|
| 43 |
+
├── processed_frames/
|
| 44 |
+
└── processed_videos/
|
| 45 |
+
```
|
| 46 |
+
### Multiple-Choice Questions (MCQs)
|
| 47 |
+
|
| 48 |
+
The script will automatically download the EgoExoBench **multiple-choice questions (MCQs)** file from this [link](https://huggingface.co/datasets/Heleun/EgoExoBench_MCQ).
|
| 49 |
+
|
| 50 |
+
## 🚀 Model Evaluation
|
| 51 |
+
|
| 52 |
+
Use the following commands to evaluate your VLMs on EgoExoBench:
|
| 53 |
+
|
| 54 |
+
```shell
|
| 55 |
+
# For lightweight vision-language models
|
| 56 |
+
torchrun --nproc-per-node=1 run.py \
|
| 57 |
+
--data EgoExoBench_MCQ \
|
| 58 |
+
--model Qwen2.5-VL-7B-Instruct-ForVideo
|
| 59 |
+
|
| 60 |
+
# For larger models with higher memory usage
|
| 61 |
+
python run.py \
|
| 62 |
+
--data EgoExoBench_MCQ \
|
| 63 |
+
--model Qwen2.5-VL-72B-Instruct-ForVideo
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
To skip evaluation on the **Ego-Exo4D** portion of the benchmark, specify the `EgoExoBench_64frame_skip_EgoExo4D` configuration with the **`--data`** argument.
|
| 67 |
+
|
| 68 |
+
```
|
| 69 |
+
# Example command to skip Ego-Exo4D
|
| 70 |
+
torchrun --nproc-per-node=1 run.py \
|
| 71 |
+
--data EgoExoBench_64frame_skip_EgoExo4D \
|
| 72 |
+
--model [Your_Model_Name]
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
> 💡 Note: If you encounter errors related to stacking videos with varying frame counts, try using `transformers==4.49.0` as a temporary workaround.
|
| 76 |
+
|
| 77 |
+
## 🙏 Acknowledgements
|
| 78 |
+
|
| 79 |
+
EgoExoBench builds upon publicly available ego–exo datasets: [Ego-Exo4D](https://ego-exo4d-data.org/), [LEMMA](https://sites.google.com/view/lemma-activity), [EgoExoLearn](https://huggingface.co/datasets/hyf015/EgoExoLearn), [TF2023](https://github.com/ziweizhao1993/PEN), [EgoMe](https://huggingface.co/datasets/HeqianQiu/EgoMe), [CVMHAT](https://github.com/RuizeHan/CVMHT). Thanks for open-sourcing!
|
VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (20.6 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/cvmhat_preprocess.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import xml.etree.ElementTree as ET
|
| 4 |
+
import cv2
|
| 5 |
+
|
| 6 |
+
# replace with your actual path
|
| 7 |
+
ann_file = 'EgoExoBench/MCQ/Ego-Exo-Relation/person_relation.json'
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def add_bbox(bbox_img_path):
|
| 11 |
+
bbox_dir = os.path.dirname(bbox_img_path)
|
| 12 |
+
os.makedirs(bbox_dir, exist_ok=True)
|
| 13 |
+
ori_img_dir = os.path.dirname(bbox_img_path).replace('bbox', 'frame_sel')
|
| 14 |
+
frame_idx, person_id = os.path.basename(bbox_img_path).split('.')[0].split('_')
|
| 15 |
+
ori_img_path = os.path.join(ori_img_dir, frame_idx + '.jpg')
|
| 16 |
+
xml_file = ori_img_path.replace('data', 'GT_xml').replace('frame_sel/', '').replace('.jpg', '.xml')
|
| 17 |
+
|
| 18 |
+
tree = ET.parse(xml_file)
|
| 19 |
+
root = tree.getroot()
|
| 20 |
+
im = cv2.imread(ori_img_path)
|
| 21 |
+
for object in root.findall('object'):
|
| 22 |
+
object_name = object.find('name').text
|
| 23 |
+
if object_name != person_id:
|
| 24 |
+
continue
|
| 25 |
+
im_copy = im.copy()
|
| 26 |
+
Xmin = int(object.find('rectangle').find('xmin').text)
|
| 27 |
+
Ymin = int(object.find('rectangle').find('ymin').text)
|
| 28 |
+
Xmax = int(object.find('rectangle').find('xmax').text)
|
| 29 |
+
Ymax = int(object.find('rectangle').find('ymax').text)
|
| 30 |
+
color = (255, 0, 0)
|
| 31 |
+
cv2.rectangle(im_copy,(Xmin,Ymin),(Xmax,Ymax),color,3)
|
| 32 |
+
cv2.imwrite(bbox_img_path, im_copy)
|
| 33 |
+
return
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
with open(ann_file, 'r') as f:
|
| 37 |
+
ann_data = json.load(f)
|
| 38 |
+
for aitem in ann_data.values():
|
| 39 |
+
image_paths = []
|
| 40 |
+
image_paths.extend(aitem['query']['image_paths'])
|
| 41 |
+
for oitem in aitem['options']:
|
| 42 |
+
image_paths.extend(oitem['image_paths'])
|
| 43 |
+
|
| 44 |
+
for image_path in image_paths:
|
| 45 |
+
add_bbox(image_path)
|
VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/tf2023_preprocess.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
# replace the path with your actual path
|
| 7 |
+
ann_file = 'EgoExoBench/MCQ/Ego-Exo-View-Transition/ego_wearer_identification.json'
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def add_bbox(bbox_img_path):
|
| 11 |
+
|
| 12 |
+
bbox_dir = os.path.dirname(bbox_img_path)
|
| 13 |
+
os.makedirs(bbox_dir, exist_ok=True)
|
| 14 |
+
vid, frame_idx, person_id = bbox_img_path.split('/')[-4],bbox_img_path.split('/')[-2], bbox_img_path.split('/')[-1].split('.')[0] # noqa: E501
|
| 15 |
+
import os.path as osp
|
| 16 |
+
json_file = os.path.join(osp.dirname(osp.dirname(osp.dirname(osp.dirname(bbox_img_path)))), vid, 'Segmentation/T', frame_idx + '.json') # noqa: E501
|
| 17 |
+
ori_img_path = json_file.replace('.json', '.jpg')
|
| 18 |
+
|
| 19 |
+
with open(json_file, mode='r', encoding="utf-8") as f:
|
| 20 |
+
configs = json.load(f)
|
| 21 |
+
shapes = configs["shapes"]
|
| 22 |
+
|
| 23 |
+
mask = np.zeros((configs["imageHeight"], configs["imageWidth"], 1), np.uint8)
|
| 24 |
+
|
| 25 |
+
if not os.path.exists(ori_img_path):
|
| 26 |
+
ori_img_path = ori_img_path.replace('T/', '')
|
| 27 |
+
|
| 28 |
+
if not os.path.exists(ori_img_path):
|
| 29 |
+
ori_img_path = ori_img_path.replace('Segmentation/', 'frame/T/')
|
| 30 |
+
|
| 31 |
+
original_image = cv2.imread(ori_img_path)
|
| 32 |
+
|
| 33 |
+
for shape in shapes:
|
| 34 |
+
if shape['label'] != person_id:
|
| 35 |
+
continue
|
| 36 |
+
|
| 37 |
+
cv2.fillPoly(mask, [np.array(shape["points"], np.int32)], 1)
|
| 38 |
+
|
| 39 |
+
retval, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
| 40 |
+
stats = stats[stats[:,4].argsort()]
|
| 41 |
+
bboxs = stats[:-1]
|
| 42 |
+
|
| 43 |
+
for b in bboxs:
|
| 44 |
+
x0, y0 = b[0], b[1]
|
| 45 |
+
x1 = b[0] + b[2]
|
| 46 |
+
y1 = b[1] + b[3]
|
| 47 |
+
|
| 48 |
+
start_point, end_point = (x0, y0), (x1, y1)
|
| 49 |
+
color = (0, 0, 255)
|
| 50 |
+
thickness = 2
|
| 51 |
+
mask_bboxs = cv2.rectangle(original_image, start_point, end_point, color, thickness)
|
| 52 |
+
mask_bboxs = cv2.resize(mask_bboxs, (540, 360))
|
| 53 |
+
cv2.imwrite(bbox_img_path, mask_bboxs)
|
| 54 |
+
return
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def rescale_img(img_path, width, height):
|
| 58 |
+
img = cv2.imread(img_path)
|
| 59 |
+
resized_img = cv2.resize(img, (width, height))
|
| 60 |
+
cv2.imwrite(img_path, resized_img)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
with open(ann_file, 'r') as f:
|
| 64 |
+
ann_data = json.load(f)
|
| 65 |
+
for aitem in ann_data.values():
|
| 66 |
+
image_paths = []
|
| 67 |
+
for oitem in aitem['options']:
|
| 68 |
+
add_bbox(oitem['image_paths'][0])
|
| 69 |
+
|
| 70 |
+
for img_path in aitem['query']['image_paths']:
|
| 71 |
+
rescale_img(img_path, 960, 540)
|
VLMEvalKit-sudoku/vlmeval/dataset/EgoExoBench/utils.py
ADDED
|
@@ -0,0 +1,771 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from ..utils.multiple_choice import extract_answer_from_item
|
| 3 |
+
from PIL import Image, ImageOps
|
| 4 |
+
import torchvision
|
| 5 |
+
import random
|
| 6 |
+
import numbers
|
| 7 |
+
import math
|
| 8 |
+
import torch
|
| 9 |
+
import json
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import re
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def get_dimension_rating(data_path, category_type='subtask_type'):
|
| 18 |
+
data = load(data_path)
|
| 19 |
+
result_board = {}
|
| 20 |
+
for idx, item in data.iterrows():
|
| 21 |
+
if item[category_type] not in result_board:
|
| 22 |
+
result_board[item[category_type]] = [0, 0]
|
| 23 |
+
result_board[item[category_type]][1] += 1
|
| 24 |
+
if item['score']:
|
| 25 |
+
result_board[item[category_type]][0] += 1
|
| 26 |
+
|
| 27 |
+
correct = 0
|
| 28 |
+
total = 0
|
| 29 |
+
for key, value in result_board.items():
|
| 30 |
+
correct += value[0]
|
| 31 |
+
total += value[1]
|
| 32 |
+
result_board[key].append(f'{value[0] / value[1] * 100:.2f}%')
|
| 33 |
+
|
| 34 |
+
result_board['overall'] = [correct, total, f'{correct / total * 100:.2f}%']
|
| 35 |
+
|
| 36 |
+
return result_board
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def extract_characters_regex(s):
|
| 40 |
+
s = s.strip()
|
| 41 |
+
answer_prefixes = [
|
| 42 |
+
'The best answer is',
|
| 43 |
+
'The correct answer is',
|
| 44 |
+
'The answer is',
|
| 45 |
+
'The answer',
|
| 46 |
+
'The best option is'
|
| 47 |
+
'The correct option is',
|
| 48 |
+
'Best answer:'
|
| 49 |
+
'Best option:',
|
| 50 |
+
'Answer:',
|
| 51 |
+
'Option:',
|
| 52 |
+
]
|
| 53 |
+
for answer_prefix in answer_prefixes:
|
| 54 |
+
s = s.replace(answer_prefix, '')
|
| 55 |
+
|
| 56 |
+
if len(s.split()) > 10 and not re.search('[ABCD]', s):
|
| 57 |
+
return ''
|
| 58 |
+
matches = re.search(r'[ABCD]', s)
|
| 59 |
+
if matches is None:
|
| 60 |
+
return ''
|
| 61 |
+
return matches[0]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def extract_option(model, input_item, dataset_name):
|
| 65 |
+
options = input_item['question'].split('\n')[1:]
|
| 66 |
+
for id, option in enumerate(options):
|
| 67 |
+
option_id = chr(ord('A') + id) + '.'
|
| 68 |
+
if option.find(option_id) >= 0:
|
| 69 |
+
input_item[chr(ord('A') + id)] = option[option.find(option_id) + len(option_id):].strip('. \n')
|
| 70 |
+
return extract_answer_from_item(model, input_item, dataset_name)['opt']
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def process_results(score_file,model_name):
|
| 74 |
+
from sklearn.metrics import (
|
| 75 |
+
accuracy_score,
|
| 76 |
+
precision_score,
|
| 77 |
+
recall_score,
|
| 78 |
+
f1_score,
|
| 79 |
+
classification_report,
|
| 80 |
+
confusion_matrix,
|
| 81 |
+
roc_auc_score
|
| 82 |
+
)
|
| 83 |
+
data = pd.read_excel(score_file)
|
| 84 |
+
|
| 85 |
+
# Create the prediction column based on the Score and Answer columns
|
| 86 |
+
data['prediction'] = data.apply(
|
| 87 |
+
lambda row: row['answer'] if row['score'] == 1 else ('Yes' if row['answer'] == 'No' else 'No'), axis=1
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# Recompute metrics for tamper types including 'original' in the calculations but exclude 'original' from the output
|
| 91 |
+
grouped_metrics_with_original_excluding_original = {}
|
| 92 |
+
|
| 93 |
+
original_group = data[data['tamper_type'] == 'original']
|
| 94 |
+
|
| 95 |
+
for tamper_type, group in data[data['tamper_type'] != 'original'].groupby('tamper_type'):
|
| 96 |
+
# Combine the current group with the 'original' group
|
| 97 |
+
combined_group = pd.concat([group, original_group])
|
| 98 |
+
|
| 99 |
+
# Extract ground truth and predictions for the combined group
|
| 100 |
+
y_true_group = combined_group['answer'].map({'Yes': 1, 'No': 0})
|
| 101 |
+
y_pred_group = combined_group['prediction'].map({'Yes': 1, 'No': 0})
|
| 102 |
+
|
| 103 |
+
# Calculate metrics for the combined group
|
| 104 |
+
accuracy = accuracy_score(y_true_group, y_pred_group)
|
| 105 |
+
precision = precision_score(y_true_group, y_pred_group, zero_division=0)
|
| 106 |
+
recall = recall_score(y_true_group, y_pred_group, zero_division=0)
|
| 107 |
+
f1 = f1_score(y_true_group, y_pred_group, zero_division=0)
|
| 108 |
+
conf_matrix = confusion_matrix(y_true_group, y_pred_group)
|
| 109 |
+
|
| 110 |
+
# Store metrics for the tamper_type
|
| 111 |
+
grouped_metrics_with_original_excluding_original[tamper_type] = {
|
| 112 |
+
"Accuracy": accuracy,
|
| 113 |
+
"Precision": precision,
|
| 114 |
+
"Recall": recall,
|
| 115 |
+
"F1 Score": f1,
|
| 116 |
+
"Confusion Matrix": conf_matrix.tolist() # Convert to list for JSON compatibility
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
# Add the Macro Average row to the Dictionary
|
| 120 |
+
# grouped_metrics_with_original_excluding_original["overall"] = macro_averages
|
| 121 |
+
|
| 122 |
+
# Display the metrics in a dataframe for clarity
|
| 123 |
+
df_grouped_metrics_with_original_excluding_original = pd.DataFrame.from_dict(
|
| 124 |
+
grouped_metrics_with_original_excluding_original, orient='index'
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# Compute Macro Averages for Accuracy, Precision, Recall, and F1 Score
|
| 128 |
+
macro_averages = {
|
| 129 |
+
"Accuracy": df_grouped_metrics_with_original_excluding_original["Accuracy"].mean(),
|
| 130 |
+
"Precision": df_grouped_metrics_with_original_excluding_original["Precision"].mean(),
|
| 131 |
+
"Recall": df_grouped_metrics_with_original_excluding_original["Recall"].mean(),
|
| 132 |
+
"F1 Score": df_grouped_metrics_with_original_excluding_original["F1 Score"].mean(),
|
| 133 |
+
"Confusion Matrix": "N/A" # Macro average doesn't have a meaningful confusion matrix
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
# # Add the Macro Average row to the DataFrame
|
| 137 |
+
df_grouped_metrics_with_original_excluding_original.loc["overall"] = macro_averages
|
| 138 |
+
|
| 139 |
+
# df_grouped_metrics_with_original_excluding_original
|
| 140 |
+
metrics_dict = json.loads(df_grouped_metrics_with_original_excluding_original.T.to_json())
|
| 141 |
+
# Process Model Level Metrics
|
| 142 |
+
formatted_data = []
|
| 143 |
+
for task, task_metrics in metrics_dict.items():
|
| 144 |
+
task_metrics['Model'] = model_name
|
| 145 |
+
task_metrics['Task'] = task
|
| 146 |
+
formatted_data.append(task_metrics)
|
| 147 |
+
|
| 148 |
+
df_metrics = pd.DataFrame(formatted_data)
|
| 149 |
+
|
| 150 |
+
# Reorder columns to make 'Model' and 'Task' appear first
|
| 151 |
+
columns_order = ['Model', 'Task'] + [col for col in df_metrics.columns if col not in ['Model', 'Task']]
|
| 152 |
+
df_metrics = df_metrics[columns_order]
|
| 153 |
+
|
| 154 |
+
return df_metrics
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def aggregate_metrics_with_macro_average(score_file):
|
| 158 |
+
from sklearn.metrics import (
|
| 159 |
+
accuracy_score,
|
| 160 |
+
precision_score,
|
| 161 |
+
recall_score,
|
| 162 |
+
f1_score,
|
| 163 |
+
classification_report,
|
| 164 |
+
confusion_matrix,
|
| 165 |
+
roc_auc_score
|
| 166 |
+
)
|
| 167 |
+
# Load data
|
| 168 |
+
data = pd.read_excel(score_file)
|
| 169 |
+
|
| 170 |
+
# Create the prediction column based on the Score and Answer columns
|
| 171 |
+
data['prediction'] = data.apply(
|
| 172 |
+
lambda row: row['answer'] if row['score'] == 1 else ('Yes' if row['answer'] == 'No' else 'No'), axis=1
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# Initialize a dictionary to store metrics
|
| 176 |
+
task_type_metrics = {}
|
| 177 |
+
|
| 178 |
+
# Process each task_type separately
|
| 179 |
+
for task_type, task_group in data.groupby('task_type'):
|
| 180 |
+
# Separate the 'original' group for the current task_type
|
| 181 |
+
original_group = task_group[task_group['tamper_type'] == 'original']
|
| 182 |
+
|
| 183 |
+
# Skip if there is no 'original' data for this task_type
|
| 184 |
+
if original_group.empty:
|
| 185 |
+
continue
|
| 186 |
+
|
| 187 |
+
# Process each tamper type for the current task_type (excluding 'original')
|
| 188 |
+
tamper_metrics = {}
|
| 189 |
+
for tamper_type, tamper_group in task_group[task_group['tamper_type'] != 'original'].groupby('tamper_type'):
|
| 190 |
+
|
| 191 |
+
# Combine the tamper group with the original group of the current task_type
|
| 192 |
+
combined_group = pd.concat([tamper_group, original_group])
|
| 193 |
+
|
| 194 |
+
# Map answers and predictions to binary values
|
| 195 |
+
y_true = combined_group['answer'].map({'Yes': 1, 'No': 0})
|
| 196 |
+
y_pred = combined_group['prediction'].map({'Yes': 1, 'No': 0})
|
| 197 |
+
|
| 198 |
+
# Compute metrics
|
| 199 |
+
accuracy = accuracy_score(y_true, y_pred)
|
| 200 |
+
precision = precision_score(y_true, y_pred, zero_division=0)
|
| 201 |
+
recall = recall_score(y_true, y_pred, zero_division=0)
|
| 202 |
+
f1 = f1_score(y_true, y_pred, zero_division=0)
|
| 203 |
+
conf_matrix = confusion_matrix(y_true, y_pred)
|
| 204 |
+
|
| 205 |
+
# Store metrics for the tamper_type
|
| 206 |
+
tamper_metrics[tamper_type] = {
|
| 207 |
+
"Accuracy": accuracy,
|
| 208 |
+
"Precision": precision,
|
| 209 |
+
"Recall": recall,
|
| 210 |
+
"F1 Score": f1,
|
| 211 |
+
"Confusion Matrix": conf_matrix.tolist() # Convert to list for JSON compatibility
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
# Compute Macro Averages for the current task_type
|
| 215 |
+
metrics_df = pd.DataFrame(tamper_metrics).T
|
| 216 |
+
macro_average = {
|
| 217 |
+
"Accuracy": metrics_df["Accuracy"].mean(),
|
| 218 |
+
"Precision": metrics_df["Precision"].mean(),
|
| 219 |
+
"Recall": metrics_df["Recall"].mean(),
|
| 220 |
+
"F1 Score": metrics_df["F1 Score"].mean(),
|
| 221 |
+
"Confusion Matrix": "N/A" # Macro average doesn't have a meaningful confusion matrix
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
# Add the macro average as "overall" for the task_type
|
| 225 |
+
tamper_metrics["overall"] = macro_average
|
| 226 |
+
|
| 227 |
+
# Add tamper metrics for the current task_type to the main dictionary
|
| 228 |
+
task_type_metrics[task_type] = tamper_metrics
|
| 229 |
+
|
| 230 |
+
# Transform the nested dictionary into a DataFrame
|
| 231 |
+
dataframes = []
|
| 232 |
+
for task_type, metrics in task_type_metrics.items():
|
| 233 |
+
task_df = pd.DataFrame.from_dict(metrics, orient='index')
|
| 234 |
+
task_df['task_type'] = task_type # Add the task_type as a column
|
| 235 |
+
dataframes.append(task_df)
|
| 236 |
+
|
| 237 |
+
# Combine all task-specific DataFrames into a single DataFrame
|
| 238 |
+
result_df = pd.concat(dataframes).reset_index().rename(columns={'index': 'tamper_type'})
|
| 239 |
+
# Reorder the columns to place task_type first, then tamper_type
|
| 240 |
+
result_df = result_df[['task_type', 'tamper_type', 'Accuracy', 'Precision', 'Recall',
|
| 241 |
+
'F1 Score', 'Confusion Matrix']]
|
| 242 |
+
|
| 243 |
+
# Select only numeric columns for aggregation
|
| 244 |
+
numeric_columns = ['Accuracy', 'Precision', 'Recall', 'F1 Score']
|
| 245 |
+
|
| 246 |
+
# Group by task_type and tamper_type, and calculate the mean for numeric columns
|
| 247 |
+
average_metrics = result_df.groupby(['task_type', 'tamper_type'])[numeric_columns].mean().reset_index()
|
| 248 |
+
|
| 249 |
+
return average_metrics
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def check_ans(pred, gt):
|
| 253 |
+
"""
|
| 254 |
+
Checks if the predicted answer matches the ground truth.
|
| 255 |
+
|
| 256 |
+
Args:
|
| 257 |
+
pred (str): The predicted answer.
|
| 258 |
+
gt (str): The ground truth answer.
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
bool: True if the predicted answer matches the ground truth, False otherwise.
|
| 262 |
+
"""
|
| 263 |
+
# Convert both predictions and ground truths to lowercase and split them into options and contents
|
| 264 |
+
flag = False
|
| 265 |
+
|
| 266 |
+
# Split prediction into option and content
|
| 267 |
+
pred_list = pred.lower().strip().split(' ')
|
| 268 |
+
pred_option, _ = pred_list[0], ' '.join(pred_list[1:])
|
| 269 |
+
|
| 270 |
+
# Split ground truth into option and content
|
| 271 |
+
gt_list = gt.lower().strip().split(' ')
|
| 272 |
+
gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:])
|
| 273 |
+
|
| 274 |
+
# Remove trailing period from ground truth content if present
|
| 275 |
+
if gt_content[-1] == '.':
|
| 276 |
+
gt_content = gt_content[:-1]
|
| 277 |
+
|
| 278 |
+
# Check for matching conditions
|
| 279 |
+
# Condition 1: If the predicted option is a substring of the ground truth option
|
| 280 |
+
if pred_option.replace('.', '') in gt_option:
|
| 281 |
+
flag = True
|
| 282 |
+
# Condition 2: If the ground truth option is a substring of the predicted option
|
| 283 |
+
elif gt_option in pred_option:
|
| 284 |
+
flag = True
|
| 285 |
+
# Condition 3: If the ground truth is a substring of the predicted answer
|
| 286 |
+
elif gt in pred:
|
| 287 |
+
flag = True
|
| 288 |
+
|
| 289 |
+
return flag
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def check_ans_with_model(pred, gt, model, item, dataset_name='MVBench'):
|
| 293 |
+
"""
|
| 294 |
+
Checks if the predicted answer matches the ground truth using a given model.
|
| 295 |
+
|
| 296 |
+
Args:
|
| 297 |
+
pred (str): The predicted answer.
|
| 298 |
+
gt (str): The ground truth answer.
|
| 299 |
+
model: A machine learning model used for additional verification.
|
| 300 |
+
item (dict): An item containing information about the question or task.
|
| 301 |
+
dataset_name (str, optional): Name of the dataset being used. Defaults to 'MVBench'.
|
| 302 |
+
|
| 303 |
+
Returns:
|
| 304 |
+
bool: True if the predicted answer matches the ground truth, False otherwise.
|
| 305 |
+
"""
|
| 306 |
+
# Initialize flag to track match status
|
| 307 |
+
flag = False
|
| 308 |
+
|
| 309 |
+
# Preprocess prediction and ground truth by converting to lowercase and splitting into options and contents
|
| 310 |
+
pred_list = pred.lower().strip().split(' ')
|
| 311 |
+
pred_option, _ = pred_list[0], ' '.join(pred_list[1:])
|
| 312 |
+
gt_list = gt.lower().strip().split(' ')
|
| 313 |
+
gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:])
|
| 314 |
+
|
| 315 |
+
# Remove trailing period from ground truth content if presen
|
| 316 |
+
if gt_content[-1] == '.':
|
| 317 |
+
gt_content = gt_content[:-1]
|
| 318 |
+
|
| 319 |
+
# Check for matching conditions
|
| 320 |
+
# Condition 1: If the predicted option is a substring of the ground truth option
|
| 321 |
+
if pred_option.replace('.', '') in gt_option:
|
| 322 |
+
flag = True
|
| 323 |
+
# Condition 2: If the ground truth option is a substring of the predicted option
|
| 324 |
+
elif gt_option in pred_option:
|
| 325 |
+
flag = True
|
| 326 |
+
# Condition 3: Use the provided model to verify the answer
|
| 327 |
+
elif extract_answer_from_item(model, item, dataset_name)['opt'] == item['answer']:
|
| 328 |
+
flag = True
|
| 329 |
+
|
| 330 |
+
return flag
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def check_ans_advanced(pred, gt):
|
| 334 |
+
number_table = {
|
| 335 |
+
0: 'zero',
|
| 336 |
+
1: 'one',
|
| 337 |
+
2: 'two',
|
| 338 |
+
3: 'three',
|
| 339 |
+
4: 'four',
|
| 340 |
+
5: 'five',
|
| 341 |
+
6: 'six',
|
| 342 |
+
7: 'seven',
|
| 343 |
+
8: 'eight',
|
| 344 |
+
9: 'nine',
|
| 345 |
+
}
|
| 346 |
+
flag = False
|
| 347 |
+
|
| 348 |
+
pred_list = pred.lower().split(' ')
|
| 349 |
+
pred_option, _ = pred_list[0], ' '.join(pred_list[1:])
|
| 350 |
+
gt_list = gt.lower().split(' ')
|
| 351 |
+
gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:])
|
| 352 |
+
if gt_content[-1] == '.':
|
| 353 |
+
gt_content = gt_content[:-1]
|
| 354 |
+
|
| 355 |
+
try:
|
| 356 |
+
gt_content = number_table[int(gt_content.strip('. \n'))]
|
| 357 |
+
print(gt_content)
|
| 358 |
+
except:
|
| 359 |
+
pass
|
| 360 |
+
|
| 361 |
+
if pred_option.replace('.', '') in gt_option:
|
| 362 |
+
flag = True
|
| 363 |
+
elif gt_option in pred_option:
|
| 364 |
+
flag = True
|
| 365 |
+
elif gt_content.lower().strip('. \n') in pred.lower().strip('. \n'):
|
| 366 |
+
flag = True
|
| 367 |
+
|
| 368 |
+
return flag
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
class GroupRandomCrop(object):
|
| 372 |
+
def __init__(self, size):
|
| 373 |
+
if isinstance(size, numbers.Number):
|
| 374 |
+
self.size = (int(size), int(size))
|
| 375 |
+
else:
|
| 376 |
+
self.size = size
|
| 377 |
+
|
| 378 |
+
def __call__(self, img_group):
|
| 379 |
+
|
| 380 |
+
w, h = img_group[0].size
|
| 381 |
+
th, tw = self.size
|
| 382 |
+
|
| 383 |
+
out_images = list()
|
| 384 |
+
|
| 385 |
+
x1 = random.randint(0, w - tw)
|
| 386 |
+
y1 = random.randint(0, h - th)
|
| 387 |
+
|
| 388 |
+
for img in img_group:
|
| 389 |
+
assert (img.size[0] == w and img.size[1] == h)
|
| 390 |
+
if w == tw and h == th:
|
| 391 |
+
out_images.append(img)
|
| 392 |
+
else:
|
| 393 |
+
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
|
| 394 |
+
|
| 395 |
+
return out_images
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
class MultiGroupRandomCrop(object):
|
| 399 |
+
def __init__(self, size, groups=1):
|
| 400 |
+
if isinstance(size, numbers.Number):
|
| 401 |
+
self.size = (int(size), int(size))
|
| 402 |
+
else:
|
| 403 |
+
self.size = size
|
| 404 |
+
self.groups = groups
|
| 405 |
+
|
| 406 |
+
def __call__(self, img_group):
|
| 407 |
+
|
| 408 |
+
w, h = img_group[0].size
|
| 409 |
+
th, tw = self.size
|
| 410 |
+
|
| 411 |
+
out_images = list()
|
| 412 |
+
|
| 413 |
+
for i in range(self.groups):
|
| 414 |
+
x1 = random.randint(0, w - tw)
|
| 415 |
+
y1 = random.randint(0, h - th)
|
| 416 |
+
|
| 417 |
+
for img in img_group:
|
| 418 |
+
assert (img.size[0] == w and img.size[1] == h)
|
| 419 |
+
if w == tw and h == th:
|
| 420 |
+
out_images.append(img)
|
| 421 |
+
else:
|
| 422 |
+
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
|
| 423 |
+
|
| 424 |
+
return out_images
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
class GroupCenterCrop(object):
|
| 428 |
+
def __init__(self, size):
|
| 429 |
+
self.worker = torchvision.transforms.CenterCrop(size)
|
| 430 |
+
|
| 431 |
+
def __call__(self, img_group):
|
| 432 |
+
return [self.worker(img) for img in img_group]
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
class GroupRandomHorizontalFlip(object):
|
| 436 |
+
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
def __init__(self, is_flow=False):
|
| 440 |
+
self.is_flow = is_flow
|
| 441 |
+
|
| 442 |
+
def __call__(self, img_group, is_flow=False):
|
| 443 |
+
v = random.random()
|
| 444 |
+
if v < 0.5:
|
| 445 |
+
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
|
| 446 |
+
if self.is_flow:
|
| 447 |
+
for i in range(0, len(ret), 2):
|
| 448 |
+
# invert flow pixel values when flipping
|
| 449 |
+
ret[i] = ImageOps.invert(ret[i])
|
| 450 |
+
return ret
|
| 451 |
+
else:
|
| 452 |
+
return img_group
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
class GroupNormalize(object):
|
| 456 |
+
def __init__(self, mean, std):
|
| 457 |
+
self.mean = mean
|
| 458 |
+
self.std = std
|
| 459 |
+
|
| 460 |
+
def __call__(self, tensor):
|
| 461 |
+
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
|
| 462 |
+
rep_std = self.std * (tensor.size()[0] // len(self.std))
|
| 463 |
+
|
| 464 |
+
# TODO: make efficient
|
| 465 |
+
for t, m, s in zip(tensor, rep_mean, rep_std):
|
| 466 |
+
t.sub_(m).div_(s)
|
| 467 |
+
|
| 468 |
+
return tensor
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
class GroupScale(object):
|
| 472 |
+
""" Rescales the input PIL.Image to the given 'size'.
|
| 473 |
+
'size' will be the size of the smaller edge.
|
| 474 |
+
For example, if height > width, then image will be
|
| 475 |
+
rescaled to (size * height / width, size)
|
| 476 |
+
size: size of the smaller edge
|
| 477 |
+
interpolation: Default: PIL.Image.BILINEAR
|
| 478 |
+
"""
|
| 479 |
+
|
| 480 |
+
def __init__(self, size, interpolation=Image.BILINEAR):
|
| 481 |
+
self.worker = torchvision.transforms.Resize(size, interpolation)
|
| 482 |
+
|
| 483 |
+
def __call__(self, img_group):
|
| 484 |
+
return [self.worker(img) for img in img_group]
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
class GroupOverSample(object):
|
| 488 |
+
def __init__(self, crop_size, scale_size=None, flip=True):
|
| 489 |
+
self.crop_size = crop_size if not isinstance(
|
| 490 |
+
crop_size, int) else (crop_size, crop_size)
|
| 491 |
+
|
| 492 |
+
if scale_size is not None:
|
| 493 |
+
self.scale_worker = GroupScale(scale_size)
|
| 494 |
+
else:
|
| 495 |
+
self.scale_worker = None
|
| 496 |
+
self.flip = flip
|
| 497 |
+
|
| 498 |
+
def __call__(self, img_group):
|
| 499 |
+
|
| 500 |
+
if self.scale_worker is not None:
|
| 501 |
+
img_group = self.scale_worker(img_group)
|
| 502 |
+
|
| 503 |
+
image_w, image_h = img_group[0].size
|
| 504 |
+
crop_w, crop_h = self.crop_size
|
| 505 |
+
|
| 506 |
+
offsets = GroupMultiScaleCrop.fill_fix_offset(
|
| 507 |
+
False, image_w, image_h, crop_w, crop_h)
|
| 508 |
+
oversample_group = list()
|
| 509 |
+
for o_w, o_h in offsets:
|
| 510 |
+
normal_group = list()
|
| 511 |
+
flip_group = list()
|
| 512 |
+
for i, img in enumerate(img_group):
|
| 513 |
+
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
|
| 514 |
+
normal_group.append(crop)
|
| 515 |
+
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
|
| 516 |
+
|
| 517 |
+
if img.mode == 'L' and i % 2 == 0:
|
| 518 |
+
flip_group.append(ImageOps.invert(flip_crop))
|
| 519 |
+
else:
|
| 520 |
+
flip_group.append(flip_crop)
|
| 521 |
+
|
| 522 |
+
oversample_group.extend(normal_group)
|
| 523 |
+
if self.flip:
|
| 524 |
+
oversample_group.extend(flip_group)
|
| 525 |
+
return oversample_group
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
class GroupFullResSample(object):
|
| 529 |
+
def __init__(self, crop_size, scale_size=None, flip=True):
|
| 530 |
+
self.crop_size = crop_size if not isinstance(
|
| 531 |
+
crop_size, int) else (crop_size, crop_size)
|
| 532 |
+
|
| 533 |
+
if scale_size is not None:
|
| 534 |
+
self.scale_worker = GroupScale(scale_size)
|
| 535 |
+
else:
|
| 536 |
+
self.scale_worker = None
|
| 537 |
+
self.flip = flip
|
| 538 |
+
|
| 539 |
+
def __call__(self, img_group):
|
| 540 |
+
|
| 541 |
+
if self.scale_worker is not None:
|
| 542 |
+
img_group = self.scale_worker(img_group)
|
| 543 |
+
|
| 544 |
+
image_w, image_h = img_group[0].size
|
| 545 |
+
crop_w, crop_h = self.crop_size
|
| 546 |
+
|
| 547 |
+
w_step = (image_w - crop_w) // 4
|
| 548 |
+
h_step = (image_h - crop_h) // 4
|
| 549 |
+
|
| 550 |
+
offsets = list()
|
| 551 |
+
offsets.append((0 * w_step, 2 * h_step)) # left
|
| 552 |
+
offsets.append((4 * w_step, 2 * h_step)) # right
|
| 553 |
+
offsets.append((2 * w_step, 2 * h_step)) # center
|
| 554 |
+
|
| 555 |
+
oversample_group = list()
|
| 556 |
+
for o_w, o_h in offsets:
|
| 557 |
+
normal_group = list()
|
| 558 |
+
flip_group = list()
|
| 559 |
+
for i, img in enumerate(img_group):
|
| 560 |
+
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
|
| 561 |
+
normal_group.append(crop)
|
| 562 |
+
if self.flip:
|
| 563 |
+
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
|
| 564 |
+
|
| 565 |
+
if img.mode == 'L' and i % 2 == 0:
|
| 566 |
+
flip_group.append(ImageOps.invert(flip_crop))
|
| 567 |
+
else:
|
| 568 |
+
flip_group.append(flip_crop)
|
| 569 |
+
|
| 570 |
+
oversample_group.extend(normal_group)
|
| 571 |
+
oversample_group.extend(flip_group)
|
| 572 |
+
return oversample_group
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
class GroupMultiScaleCrop(object):
|
| 576 |
+
|
| 577 |
+
def __init__(self, input_size, scales=None, max_distort=1,
|
| 578 |
+
fix_crop=True, more_fix_crop=True):
|
| 579 |
+
self.scales = scales if scales is not None else [1, .875, .75, .66]
|
| 580 |
+
self.max_distort = max_distort
|
| 581 |
+
self.fix_crop = fix_crop
|
| 582 |
+
self.more_fix_crop = more_fix_crop
|
| 583 |
+
self.input_size = input_size if not isinstance(input_size, int) else [
|
| 584 |
+
input_size, input_size]
|
| 585 |
+
self.interpolation = Image.BILINEAR
|
| 586 |
+
|
| 587 |
+
def __call__(self, img_group):
|
| 588 |
+
|
| 589 |
+
im_size = img_group[0].size
|
| 590 |
+
|
| 591 |
+
crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
|
| 592 |
+
crop_img_group = [
|
| 593 |
+
img.crop(
|
| 594 |
+
(offset_w,
|
| 595 |
+
offset_h,
|
| 596 |
+
offset_w + crop_w,
|
| 597 |
+
offset_h + crop_h)) for img in img_group]
|
| 598 |
+
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
|
| 599 |
+
for img in crop_img_group]
|
| 600 |
+
return ret_img_group
|
| 601 |
+
|
| 602 |
+
def _sample_crop_size(self, im_size):
|
| 603 |
+
image_w, image_h = im_size[0], im_size[1]
|
| 604 |
+
|
| 605 |
+
# find a crop size
|
| 606 |
+
base_size = min(image_w, image_h)
|
| 607 |
+
crop_sizes = [int(base_size * x) for x in self.scales]
|
| 608 |
+
crop_h = [
|
| 609 |
+
self.input_size[1] if abs(
|
| 610 |
+
x - self.input_size[1]) < 3 else x for x in crop_sizes]
|
| 611 |
+
crop_w = [
|
| 612 |
+
self.input_size[0] if abs(
|
| 613 |
+
x - self.input_size[0]) < 3 else x for x in crop_sizes]
|
| 614 |
+
|
| 615 |
+
pairs = []
|
| 616 |
+
for i, h in enumerate(crop_h):
|
| 617 |
+
for j, w in enumerate(crop_w):
|
| 618 |
+
if abs(i - j) <= self.max_distort:
|
| 619 |
+
pairs.append((w, h))
|
| 620 |
+
|
| 621 |
+
crop_pair = random.choice(pairs)
|
| 622 |
+
if not self.fix_crop:
|
| 623 |
+
w_offset = random.randint(0, image_w - crop_pair[0])
|
| 624 |
+
h_offset = random.randint(0, image_h - crop_pair[1])
|
| 625 |
+
else:
|
| 626 |
+
w_offset, h_offset = self._sample_fix_offset(
|
| 627 |
+
image_w, image_h, crop_pair[0], crop_pair[1])
|
| 628 |
+
|
| 629 |
+
return crop_pair[0], crop_pair[1], w_offset, h_offset
|
| 630 |
+
|
| 631 |
+
def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
|
| 632 |
+
offsets = self.fill_fix_offset(
|
| 633 |
+
self.more_fix_crop, image_w, image_h, crop_w, crop_h)
|
| 634 |
+
return random.choice(offsets)
|
| 635 |
+
|
| 636 |
+
@staticmethod
|
| 637 |
+
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
|
| 638 |
+
w_step = (image_w - crop_w) // 4
|
| 639 |
+
h_step = (image_h - crop_h) // 4
|
| 640 |
+
|
| 641 |
+
ret = list()
|
| 642 |
+
ret.append((0, 0)) # upper left
|
| 643 |
+
ret.append((4 * w_step, 0)) # upper right
|
| 644 |
+
ret.append((0, 4 * h_step)) # lower left
|
| 645 |
+
ret.append((4 * w_step, 4 * h_step)) # lower right
|
| 646 |
+
ret.append((2 * w_step, 2 * h_step)) # center
|
| 647 |
+
|
| 648 |
+
if more_fix_crop:
|
| 649 |
+
ret.append((0, 2 * h_step)) # center left
|
| 650 |
+
ret.append((4 * w_step, 2 * h_step)) # center right
|
| 651 |
+
ret.append((2 * w_step, 4 * h_step)) # lower center
|
| 652 |
+
ret.append((2 * w_step, 0 * h_step)) # upper center
|
| 653 |
+
|
| 654 |
+
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
|
| 655 |
+
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
|
| 656 |
+
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
|
| 657 |
+
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
|
| 658 |
+
|
| 659 |
+
return ret
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
class GroupRandomSizedCrop(object):
|
| 663 |
+
"""Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
|
| 664 |
+
and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
|
| 665 |
+
This is popularly used to train the Inception networks
|
| 666 |
+
size: size of the smaller edge
|
| 667 |
+
interpolation: Default: PIL.Image.BILINEAR
|
| 668 |
+
"""
|
| 669 |
+
|
| 670 |
+
def __init__(self, size, interpolation=Image.BILINEAR):
|
| 671 |
+
self.size = size
|
| 672 |
+
self.interpolation = interpolation
|
| 673 |
+
|
| 674 |
+
def __call__(self, img_group):
|
| 675 |
+
for attempt in range(10):
|
| 676 |
+
area = img_group[0].size[0] * img_group[0].size[1]
|
| 677 |
+
target_area = random.uniform(0.08, 1.0) * area
|
| 678 |
+
aspect_ratio = random.uniform(3. / 4, 4. / 3)
|
| 679 |
+
|
| 680 |
+
w = int(round(math.sqrt(target_area * aspect_ratio)))
|
| 681 |
+
h = int(round(math.sqrt(target_area / aspect_ratio)))
|
| 682 |
+
|
| 683 |
+
if random.random() < 0.5:
|
| 684 |
+
w, h = h, w
|
| 685 |
+
|
| 686 |
+
if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
|
| 687 |
+
x1 = random.randint(0, img_group[0].size[0] - w)
|
| 688 |
+
y1 = random.randint(0, img_group[0].size[1] - h)
|
| 689 |
+
found = True
|
| 690 |
+
break
|
| 691 |
+
else:
|
| 692 |
+
found = False
|
| 693 |
+
x1 = 0
|
| 694 |
+
y1 = 0
|
| 695 |
+
|
| 696 |
+
if found:
|
| 697 |
+
out_group = list()
|
| 698 |
+
for img in img_group:
|
| 699 |
+
img = img.crop((x1, y1, x1 + w, y1 + h))
|
| 700 |
+
assert (img.size == (w, h))
|
| 701 |
+
out_group.append(
|
| 702 |
+
img.resize(
|
| 703 |
+
(self.size, self.size), self.interpolation))
|
| 704 |
+
return out_group
|
| 705 |
+
else:
|
| 706 |
+
# Fallback
|
| 707 |
+
scale = GroupScale(self.size, interpolation=self.interpolation)
|
| 708 |
+
crop = GroupRandomCrop(self.size)
|
| 709 |
+
return crop(scale(img_group))
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
class ConvertDataFormat(object):
|
| 713 |
+
def __init__(self, model_type):
|
| 714 |
+
self.model_type = model_type
|
| 715 |
+
|
| 716 |
+
def __call__(self, images):
|
| 717 |
+
if self.model_type == '2D':
|
| 718 |
+
return images
|
| 719 |
+
tc, h, w = images.size()
|
| 720 |
+
t = tc // 3
|
| 721 |
+
images = images.view(t, 3, h, w)
|
| 722 |
+
images = images.permute(1, 0, 2, 3)
|
| 723 |
+
return images
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
class Stack(object):
|
| 727 |
+
|
| 728 |
+
def __init__(self, roll=False):
|
| 729 |
+
self.roll = roll
|
| 730 |
+
|
| 731 |
+
def __call__(self, img_group):
|
| 732 |
+
if img_group[0].mode == 'L':
|
| 733 |
+
return np.concatenate([np.expand_dims(x, 2)
|
| 734 |
+
for x in img_group], axis=2)
|
| 735 |
+
elif img_group[0].mode == 'RGB':
|
| 736 |
+
if self.roll:
|
| 737 |
+
return np.concatenate([np.array(x)[:, :, ::-1]
|
| 738 |
+
for x in img_group], axis=2)
|
| 739 |
+
else:
|
| 740 |
+
# print(np.concatenate(img_group, axis=2).shape)
|
| 741 |
+
# print(img_group[0].shape)
|
| 742 |
+
return np.concatenate(img_group, axis=2)
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
class ToTorchFormatTensor(object):
|
| 746 |
+
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
|
| 747 |
+
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
|
| 748 |
+
|
| 749 |
+
def __init__(self, div=True):
|
| 750 |
+
self.div = div
|
| 751 |
+
|
| 752 |
+
def __call__(self, pic):
|
| 753 |
+
if isinstance(pic, np.ndarray):
|
| 754 |
+
# handle numpy array
|
| 755 |
+
img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
|
| 756 |
+
else:
|
| 757 |
+
# handle PIL Image
|
| 758 |
+
img = torch.ByteTensor(
|
| 759 |
+
torch.ByteStorage.from_buffer(
|
| 760 |
+
pic.tobytes()))
|
| 761 |
+
img = img.view(pic.size[1], pic.size[0], len(pic.mode))
|
| 762 |
+
# put it from HWC to CHW format
|
| 763 |
+
# yikes, this transpose takes 80% of the loading time/CPU
|
| 764 |
+
img = img.transpose(0, 1).transpose(0, 2).contiguous()
|
| 765 |
+
return img.float().div(255) if self.div else img.float()
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
class IdentityTransform(object):
|
| 769 |
+
|
| 770 |
+
def __call__(self, data):
|
| 771 |
+
return data
|
VLMEvalKit-sudoku/vlmeval/dataset/GUI/__init__.py
ADDED
|
File without changes
|
VLMEvalKit-sudoku/vlmeval/dataset/GUI/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (152 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/GUI/__pycache__/screenspot_pro.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/GUI/__pycache__/screenspot_v2.cpython-310.pyc
ADDED
|
Binary file (6.35 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/GUI/screenspot.py
ADDED
|
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import tempfile
|
| 4 |
+
import itertools
|
| 5 |
+
from functools import partial
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import ast
|
| 9 |
+
|
| 10 |
+
from ..image_base import ImageBaseDataset, img_root_map
|
| 11 |
+
from ..utils import build_judge, DEBUG_MESSAGE
|
| 12 |
+
from ...smp import *
|
| 13 |
+
from ...utils import track_progress_rich
|
| 14 |
+
from ipdb import set_trace as st
|
| 15 |
+
|
| 16 |
+
logger = get_logger("RUN")
|
| 17 |
+
|
| 18 |
+
"""
|
| 19 |
+
{
|
| 20 |
+
"img_filename": "web_3b0ad239-da6b-4f6f-8f12-f674dc90ff33.png",
|
| 21 |
+
"bbox": [42, 1102, 197, 70],
|
| 22 |
+
"question": "view the details of the item",
|
| 23 |
+
"data_type": "text",
|
| 24 |
+
"data_source": "shop"
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"img_filename": "web_3b0ad239-da6b-4f6f-8f12-f674dc90ff33.png",
|
| 28 |
+
"bbox": [93, 74, 86, 132],
|
| 29 |
+
"question": "view the previous photo",
|
| 30 |
+
"data_type": "icon",
|
| 31 |
+
"data_source": "shop"
|
| 32 |
+
}
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
SYSTEM_PROMPT = """You are a GUI agent. You are given a task and a screenshot of the screen. You need to perform pyautogui click/moveTo action to complete the task. The answer format is `pyautogui.click(x=?, y=?), x and y is necessary`""" # noqa: E501
|
| 36 |
+
|
| 37 |
+
USER_INSTRUCTION = """Please complete the following tasks by clicking using `pyautogui.click`:\n{instruction}"""
|
| 38 |
+
|
| 39 |
+
SYSTEM_PROMPT_V2 = """You are a GUI agent. You are given a screenshot of the screen and the description of a target element. You need to click the target element using `pyautogui.click`. The answer format is `pyautogui.click(x=?, y=?), x and y is necessary`""" # noqa: E501
|
| 40 |
+
USER_INSTRUCTION_V2 = """Please click the following target element using `pyautogui.click`:\n{description}"""
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def parse_bbox_aguvis(response):
|
| 44 |
+
match = re.search(r"x=([\d.]+), y=([\d.]+)", response)
|
| 45 |
+
if match:
|
| 46 |
+
click_point = [float(match.group(1)), float(match.group(2))]
|
| 47 |
+
else:
|
| 48 |
+
click_point = [0.0, 0.0]
|
| 49 |
+
return click_point
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def compute_iou(box1, box2):
|
| 53 |
+
"""
|
| 54 |
+
Compute the Intersection over Union (IoU) of two bounding boxes.
|
| 55 |
+
|
| 56 |
+
Parameters:
|
| 57 |
+
- box1 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 58 |
+
- box2 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
- float: IoU of box1 and box2.
|
| 62 |
+
"""
|
| 63 |
+
# Determine the coordinates of the intersection rectangle
|
| 64 |
+
x_left = max(box1[0], box2[0])
|
| 65 |
+
y_top = max(box1[1], box2[1])
|
| 66 |
+
x_right = min(box1[2], box2[2])
|
| 67 |
+
y_bottom = min(box1[3], box2[3])
|
| 68 |
+
|
| 69 |
+
# Compute the area of intersection
|
| 70 |
+
intersection_area = max(0, x_right - x_left) * max(0, y_bottom - y_top)
|
| 71 |
+
|
| 72 |
+
# Compute the area of both bounding boxes
|
| 73 |
+
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
| 74 |
+
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
| 75 |
+
|
| 76 |
+
# Compute the area of the union
|
| 77 |
+
union_area = box1_area + box2_area - intersection_area
|
| 78 |
+
|
| 79 |
+
# Compute the Intersection over Union
|
| 80 |
+
iou = intersection_area / union_area
|
| 81 |
+
|
| 82 |
+
return iou
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def compute_accuracy(box1, box2, threshold=0.5):
|
| 86 |
+
"""
|
| 87 |
+
Compute the accuracy of two bounding boxes based on a specified threshold.
|
| 88 |
+
|
| 89 |
+
Parameters:
|
| 90 |
+
- box1 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 91 |
+
- box2 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 92 |
+
- threshold (float): Threshold for the IoU to consider the prediction correct.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
- float: Accuracy of the prediction based on the IoU threshold.
|
| 96 |
+
"""
|
| 97 |
+
iou = compute_iou(box1, box2)
|
| 98 |
+
return iou >= threshold
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def compute_center_accuracy(box1, box2):
|
| 102 |
+
"""
|
| 103 |
+
Compute if the center point of box 2 is within box 1.
|
| 104 |
+
|
| 105 |
+
Parameters:
|
| 106 |
+
- box1 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 107 |
+
- box2 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
- bool: True if the center point of box 2 is within box 1, False otherwise.
|
| 111 |
+
"""
|
| 112 |
+
# Compute the center point of box 2
|
| 113 |
+
center_x = (box2[0] + box2[2]) / 2
|
| 114 |
+
center_y = (box2[1] + box2[3]) / 2
|
| 115 |
+
|
| 116 |
+
# Check if the center point is within box 1
|
| 117 |
+
return box1[0] <= center_x <= box1[2] and box1[1] <= center_y <= box1[3]
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def convert_bbox(bbox, image_path):
|
| 121 |
+
new_bbox = bbox if isinstance(bbox, list) else ast.literal_eval(bbox)
|
| 122 |
+
new_bbox = [
|
| 123 |
+
new_bbox[0],
|
| 124 |
+
new_bbox[1],
|
| 125 |
+
new_bbox[0] + new_bbox[2],
|
| 126 |
+
new_bbox[1] + new_bbox[3],
|
| 127 |
+
]
|
| 128 |
+
image = Image.open(image_path)
|
| 129 |
+
img_size = image.size
|
| 130 |
+
new_bbox = [
|
| 131 |
+
new_bbox[0] / img_size[0],
|
| 132 |
+
new_bbox[1] / img_size[1],
|
| 133 |
+
new_bbox[2] / img_size[0],
|
| 134 |
+
new_bbox[3] / img_size[1],
|
| 135 |
+
]
|
| 136 |
+
return new_bbox
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class ScreenSpot(ImageBaseDataset):
|
| 140 |
+
MODALITY = "IMAGE"
|
| 141 |
+
TYPE = "GUI"
|
| 142 |
+
DATASET_URL = {
|
| 143 |
+
"ScreenSpot_Mobile": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot/ScreenSpot_Mobile.tsv", # noqa
|
| 144 |
+
"ScreenSpot_Desktop": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot/ScreenSpot_Desktop.tsv", # noqa
|
| 145 |
+
"ScreenSpot_Web": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot/ScreenSpot_Web.tsv", # noqa
|
| 146 |
+
"ScreenSpot_v2_Mobile": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_v2/ScreenSpot_v2_Mobile.tsv", # noqa
|
| 147 |
+
"ScreenSpot_v2_Desktop": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_v2/ScreenSpot_v2_Desktop.tsv", # noqa
|
| 148 |
+
"ScreenSpot_v2_Web": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_v2/ScreenSpot_v2_Web.tsv", # noqa
|
| 149 |
+
} # path
|
| 150 |
+
DATASET_URL_V2 = {
|
| 151 |
+
"ScreenSpot_Mobile": "$WORK_DIR/screenspot_mobile_ug.json",
|
| 152 |
+
"ScreenSpot_Desktop": "$WORK_DIR/screenspot_desktop_ug.json",
|
| 153 |
+
"ScreenSpot_Web": "$WORK_DIR/screenspot_web_ug.json",
|
| 154 |
+
} # path
|
| 155 |
+
DATASET_MD5 = {
|
| 156 |
+
"ScreenSpot_Mobile": "a5b5299843a75c9b9574c47bc13b2c53",
|
| 157 |
+
"ScreenSpot_Desktop": "e6e7bac21b6b2475276404fce2458132",
|
| 158 |
+
"ScreenSpot_Web": "e51d168c14b8582427cf3107d236cfc5",
|
| 159 |
+
"ScreenSpot_v2_Mobile": "234c858ab4f0e787e8388a73df65a4b7",
|
| 160 |
+
"ScreenSpot_v2_Desktop": "5f2aa2a497327bd33b2512a0c75cf994",
|
| 161 |
+
"ScreenSpot_v2_Web": "01cd0877ee1b735a6d5190b053ba9482",
|
| 162 |
+
}
|
| 163 |
+
EVAL_TYPE = "point" # point or rectangle
|
| 164 |
+
RE_TYPE = "functional" # type of referring expressions: functional or composite
|
| 165 |
+
|
| 166 |
+
def __init__(
|
| 167 |
+
self,
|
| 168 |
+
dataset="ScreenSpot_Mobile",
|
| 169 |
+
skip_noimg=True,
|
| 170 |
+
skeleton=False,
|
| 171 |
+
re_type="functional",
|
| 172 |
+
):
|
| 173 |
+
# st()
|
| 174 |
+
ROOT = LMUDataRoot()
|
| 175 |
+
# You can override this variable to save image files to a different directory
|
| 176 |
+
self.dataset_name = dataset
|
| 177 |
+
self.img_root = osp.join(ROOT, "images", self.dataset_name)
|
| 178 |
+
self.RE_TYPE = re_type
|
| 179 |
+
if skeleton:
|
| 180 |
+
return
|
| 181 |
+
|
| 182 |
+
data = self.load_data(dataset)
|
| 183 |
+
self.skip_noimg = skip_noimg
|
| 184 |
+
if skip_noimg and "image" in data:
|
| 185 |
+
data = data[~pd.isna(data["image"])]
|
| 186 |
+
|
| 187 |
+
self.meta_only = True
|
| 188 |
+
self.parse_response_func = parse_bbox_aguvis # TODO: parse function can be specified through kwargs when initializing the dataset # noqa: E501
|
| 189 |
+
|
| 190 |
+
# The image field can store the base64 encoded image or another question index (for saving space)
|
| 191 |
+
if "image" in data:
|
| 192 |
+
data["image"] = [str(x) for x in data["image"]]
|
| 193 |
+
image_map = {x: y for x, y in zip(data["index"], data["image"])}
|
| 194 |
+
for k in image_map:
|
| 195 |
+
if len(image_map[k]) <= 64:
|
| 196 |
+
idx = image_map[k]
|
| 197 |
+
assert idx in image_map and len(image_map[idx]) > 64
|
| 198 |
+
image_map[k] = image_map[idx]
|
| 199 |
+
|
| 200 |
+
images = [toliststr(image_map[k]) for k in data["index"]]
|
| 201 |
+
data["image"] = [x[0] if len(x) == 1 else x for x in images]
|
| 202 |
+
self.meta_only = False
|
| 203 |
+
|
| 204 |
+
self.data = data
|
| 205 |
+
|
| 206 |
+
def prepare_tsv(self, url, file_md5=None):
|
| 207 |
+
# st()
|
| 208 |
+
if self.RE_TYPE == "functional":
|
| 209 |
+
return super().prepare_tsv(url=url, file_md5=file_md5)
|
| 210 |
+
else:
|
| 211 |
+
data_path = self.DATASET_URL_V2[self.dataset_name]
|
| 212 |
+
return pd.DataFrame(load(data_path))
|
| 213 |
+
|
| 214 |
+
@classmethod
|
| 215 |
+
def get_action_space(self):
|
| 216 |
+
return ""
|
| 217 |
+
|
| 218 |
+
@classmethod
|
| 219 |
+
def get_trajectory(self, line):
|
| 220 |
+
traj_dict = {}
|
| 221 |
+
if self.RE_TYPE == "functional":
|
| 222 |
+
traj_dict["task"] = line["question"]
|
| 223 |
+
else:
|
| 224 |
+
traj_dict["task"] = line["description"]
|
| 225 |
+
return traj_dict
|
| 226 |
+
|
| 227 |
+
def build_prompt(self, line):
|
| 228 |
+
# st()
|
| 229 |
+
if isinstance(line, int):
|
| 230 |
+
line = self.data.iloc[line]
|
| 231 |
+
tgt_path = self.dump_image(line)
|
| 232 |
+
|
| 233 |
+
if self.RE_TYPE == "functional":
|
| 234 |
+
user_instruction = USER_INSTRUCTION.format(instruction=line["question"])
|
| 235 |
+
else:
|
| 236 |
+
user_instruction = USER_INSTRUCTION_V2.format(
|
| 237 |
+
description=line["description"]
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
msgs = []
|
| 241 |
+
# add system prompt
|
| 242 |
+
if self.RE_TYPE == "functional":
|
| 243 |
+
msgs.append(dict(role="system", type="text", value=SYSTEM_PROMPT))
|
| 244 |
+
else:
|
| 245 |
+
msgs.append(dict(role="system", type="text", value=SYSTEM_PROMPT_V2))
|
| 246 |
+
if isinstance(tgt_path, list):
|
| 247 |
+
msgs.extend([dict(type="image", value=p) for p in tgt_path])
|
| 248 |
+
else:
|
| 249 |
+
msgs = [dict(type="image", value=tgt_path)]
|
| 250 |
+
msgs.append(dict(type="text", value=user_instruction))
|
| 251 |
+
return msgs
|
| 252 |
+
|
| 253 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
| 254 |
+
# st()
|
| 255 |
+
if self.EVAL_TYPE == "point":
|
| 256 |
+
return self.evaluate_point(eval_file, **judge_kwargs)
|
| 257 |
+
|
| 258 |
+
elif self.EVAL_TYPE == "rectangle":
|
| 259 |
+
return self.evaluate_rectangle(eval_file, **judge_kwargs)
|
| 260 |
+
|
| 261 |
+
def evaluate_rectangle(self, eval_file, **judge_kwargs):
|
| 262 |
+
scorers = {
|
| 263 |
+
"IoU": compute_iou,
|
| 264 |
+
"ACC@0.1": lambda x, y: compute_accuracy(x, y, 0.1),
|
| 265 |
+
"ACC@0.3": lambda x, y: compute_accuracy(x, y, 0.3),
|
| 266 |
+
"ACC@0.5": lambda x, y: compute_accuracy(x, y, 0.5),
|
| 267 |
+
"ACC@0.7": lambda x, y: compute_accuracy(x, y, 0.7),
|
| 268 |
+
"ACC@0.9": lambda x, y: compute_accuracy(x, y, 0.9),
|
| 269 |
+
"Center_ACC": compute_center_accuracy,
|
| 270 |
+
}
|
| 271 |
+
results_dict = {}
|
| 272 |
+
for key in scorers.keys():
|
| 273 |
+
results_dict.update(
|
| 274 |
+
{
|
| 275 |
+
key: [],
|
| 276 |
+
key + "_text": [],
|
| 277 |
+
key + "_icon": [],
|
| 278 |
+
}
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
result = []
|
| 282 |
+
data = load(eval_file)
|
| 283 |
+
|
| 284 |
+
assert "bbox" in data and "prediction" in data
|
| 285 |
+
lt = len(data)
|
| 286 |
+
lines = [data.iloc[i] for i in range(lt)]
|
| 287 |
+
for i in tqdm(range(len(lines))):
|
| 288 |
+
line = lines[i]
|
| 289 |
+
bbox = convert_bbox(
|
| 290 |
+
line["bbox"], os.path.join(self.img_root, line["image_path"])
|
| 291 |
+
)
|
| 292 |
+
prediction = str(line["prediction"])
|
| 293 |
+
try:
|
| 294 |
+
click_point = parse_bbox_aguvis(prediction)
|
| 295 |
+
|
| 296 |
+
match = {}
|
| 297 |
+
for score_key, score_value in scorers.items():
|
| 298 |
+
score = score_value(bbox, click_point)
|
| 299 |
+
if score_key != "IoU":
|
| 300 |
+
match[score_key.replace("ACC", "match")] = score
|
| 301 |
+
results_dict[score_key].append(score)
|
| 302 |
+
if line["data_type"] == "text":
|
| 303 |
+
results_dict[score_key + "_text"].append(score)
|
| 304 |
+
else:
|
| 305 |
+
results_dict[score_key + "_icon"].append(score)
|
| 306 |
+
except:
|
| 307 |
+
click_point = None
|
| 308 |
+
match = {score_key: False for score_key in scorers.keys() if score_key != "IoU"}
|
| 309 |
+
result.append(
|
| 310 |
+
{
|
| 311 |
+
"img_path": os.path.join(self.img_root, line["image_path"]),
|
| 312 |
+
"text": line["question"],
|
| 313 |
+
"bbox": line["bbox"],
|
| 314 |
+
"parsed_bbox": bbox,
|
| 315 |
+
"type": line["data_type"],
|
| 316 |
+
"source": line["data_source"],
|
| 317 |
+
"pred": click_point,
|
| 318 |
+
"num_matched": sum(match.values()),
|
| 319 |
+
**match,
|
| 320 |
+
}
|
| 321 |
+
)
|
| 322 |
+
for key in results_dict:
|
| 323 |
+
if len(results_dict[key]) == 0:
|
| 324 |
+
results_dict[key] = str(0)
|
| 325 |
+
else:
|
| 326 |
+
results_dict[key] = str(sum(results_dict[key]) / len(results_dict[key]))
|
| 327 |
+
score_pth = get_intermediate_file_path(eval_file, '_score', 'json')
|
| 328 |
+
dump(results_dict, score_pth)
|
| 329 |
+
|
| 330 |
+
failure_cases_path = os.environ.get("FAILURE_CASES_PATH", None)
|
| 331 |
+
if failure_cases_path is not None:
|
| 332 |
+
failure_cases = [res for res in result if not res["match"] and res["is_wrong_format"]]
|
| 333 |
+
failure_cases.sort(key=lambda r: r["num_matched"], reverse=True)
|
| 334 |
+
|
| 335 |
+
with open(failure_cases_path, "w") as f:
|
| 336 |
+
json.dump(failure_cases, f, indent=4, ensure_ascii=False)
|
| 337 |
+
return results_dict
|
| 338 |
+
|
| 339 |
+
def evaluate_point(self, eval_file, **judge_kwargs):
|
| 340 |
+
# -1: format_err, 0: wrong, 1: correct
|
| 341 |
+
stats = defaultdict(list)
|
| 342 |
+
# Will include instance-level results
|
| 343 |
+
result = []
|
| 344 |
+
|
| 345 |
+
data = load(eval_file)
|
| 346 |
+
assert "bbox" in data and "prediction" in data
|
| 347 |
+
lt = len(data)
|
| 348 |
+
lines = [data.iloc[i] for i in range(lt)]
|
| 349 |
+
for i in tqdm(range(len(lines))):
|
| 350 |
+
line = lines[i]
|
| 351 |
+
bbox = (
|
| 352 |
+
line["bbox"]
|
| 353 |
+
if isinstance(line["bbox"], list)
|
| 354 |
+
else ast.literal_eval(line["bbox"])
|
| 355 |
+
)
|
| 356 |
+
# The format of bbox is (x1, y1, w, h)
|
| 357 |
+
x1, y1, w, h = bbox
|
| 358 |
+
bbox = (x1, y1, x1 + w - 1, y1 + h - 1)
|
| 359 |
+
|
| 360 |
+
image = Image.open(os.path.join(self.img_root, line["image_path"]))
|
| 361 |
+
img_size = image.size
|
| 362 |
+
|
| 363 |
+
def make_safe(value):
|
| 364 |
+
if value == -1:
|
| 365 |
+
# we can tolerate -1 as a special value and nomalize it to 0
|
| 366 |
+
return 0
|
| 367 |
+
else:
|
| 368 |
+
return value
|
| 369 |
+
|
| 370 |
+
bbox = [
|
| 371 |
+
make_safe(bbox[0]) / img_size[0],
|
| 372 |
+
make_safe(bbox[1]) / img_size[1],
|
| 373 |
+
make_safe(bbox[2]) / img_size[0],
|
| 374 |
+
make_safe(bbox[3]) / img_size[1],
|
| 375 |
+
]
|
| 376 |
+
|
| 377 |
+
if any([x < 0 or x > 1 for x in bbox]):
|
| 378 |
+
raise ValueError(f"bbox out of range: {bbox} | {line['bbox']} | {img_size}")
|
| 379 |
+
|
| 380 |
+
key = line['data_type'] if 'category' not in line else line['category'] + ":" + line['data_type']
|
| 381 |
+
prediction = str(line["prediction"])
|
| 382 |
+
try:
|
| 383 |
+
click_point = parse_bbox_aguvis(prediction)
|
| 384 |
+
# Do Normalization By Default
|
| 385 |
+
if click_point[0] > 1 or click_point[1] > 1:
|
| 386 |
+
click_point = (click_point[0] / img_size[0], click_point[1] / img_size[1])
|
| 387 |
+
|
| 388 |
+
match = (bbox[0] <= click_point[0] <= bbox[2]) and \
|
| 389 |
+
(bbox[1] <= click_point[1] <= bbox[3])
|
| 390 |
+
|
| 391 |
+
if match:
|
| 392 |
+
stats[key].append(1)
|
| 393 |
+
else:
|
| 394 |
+
stats[key].append(0)
|
| 395 |
+
is_wrong_format = False
|
| 396 |
+
|
| 397 |
+
except Exception as e:
|
| 398 |
+
logger.warning(f"exception in screenspot eval:{e}")
|
| 399 |
+
stats[key].append(-1)
|
| 400 |
+
match, is_wrong_format, click_point = False, True, None
|
| 401 |
+
|
| 402 |
+
result.append(
|
| 403 |
+
{
|
| 404 |
+
"img_path": os.path.join(self.img_root, line["image_path"]),
|
| 405 |
+
"text": line["question"],
|
| 406 |
+
"bbox": line["bbox"],
|
| 407 |
+
"parsed_bbox": bbox,
|
| 408 |
+
"type": line["data_type"],
|
| 409 |
+
"source": line["data_source"],
|
| 410 |
+
"match": match,
|
| 411 |
+
"is_wrong_format": is_wrong_format,
|
| 412 |
+
"pred": click_point,
|
| 413 |
+
}
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
final_score_dict = {}
|
| 417 |
+
# Record the number of each category
|
| 418 |
+
final_score_dict.update({k + ':cnt': len(stats[k]) for k in stats})
|
| 419 |
+
# Calculate the Overall stats
|
| 420 |
+
full_stats = []
|
| 421 |
+
for v in stats.values():
|
| 422 |
+
full_stats.extend(v)
|
| 423 |
+
final_score_dict['Overall_Accuracy'] = np.mean([x > 0 for x in full_stats]) * 100
|
| 424 |
+
final_score_dict['Format_Err_Rate'] = np.mean([x < 0 for x in full_stats]) * 100
|
| 425 |
+
# Calculate the Accuracy of Text / Icon
|
| 426 |
+
text_stats = [v for k, v in stats.items() if k.endswith('text') for x in v]
|
| 427 |
+
text_stats = itertools.chain(*text_stats)
|
| 428 |
+
final_score_dict['Text_Accuracy'] = np.mean([x > 0 for x in text_stats]) * 100
|
| 429 |
+
icon_stats = [v for k, v in stats.items() if k.endswith('icon') for x in v]
|
| 430 |
+
icon_stats = itertools.chain(*icon_stats)
|
| 431 |
+
final_score_dict['Icon_Accuracy'] = np.mean([x > 0 for x in icon_stats]) * 100
|
| 432 |
+
# Calculate the Accuracy of Each Category
|
| 433 |
+
if 'category' in data:
|
| 434 |
+
cates = list(set(data['category']))
|
| 435 |
+
for c in cates:
|
| 436 |
+
sub_stats = [v for k, v in stats.items() if k.split(":")[0] == c for x in v]
|
| 437 |
+
sub_stats = itertools.chain(*sub_stats)
|
| 438 |
+
final_score_dict[c + '_Accuracy'] = np.mean([x > 0 for x in sub_stats]) * 100
|
| 439 |
+
|
| 440 |
+
score_pth = get_intermediate_file_path(eval_file, '_score', 'json')
|
| 441 |
+
dump(final_score_dict, score_pth)
|
| 442 |
+
|
| 443 |
+
failure_cases_path = os.environ.get("FAILURE_CASES_PATH", None)
|
| 444 |
+
if failure_cases_path is not None:
|
| 445 |
+
def click_distance(bbox, click_point):
|
| 446 |
+
x, y = click_point
|
| 447 |
+
x1, y1, x2, y2 = bbox
|
| 448 |
+
xc, yc = (x1 + x2) / 2, (y1 + y2) / 2
|
| 449 |
+
w, h = x2 - x1, y2 - y1
|
| 450 |
+
abs_shift_to_center = [abs(x - xc), abs(y - yc)] # noqa: E501
|
| 451 |
+
width_outside, height_outside = [max(0, abs_shift_to_center[0] - w / 2), max(0, abs_shift_to_center[1] - h / 2)] # noqa: E501
|
| 452 |
+
return (width_outside ** 2 + height_outside ** 2) ** 0.5 # noqa: E501
|
| 453 |
+
|
| 454 |
+
wrong_format_result = [res for res in result if res["is_wrong_format"]]
|
| 455 |
+
missed_result = [res for res in result if not res["match"] and not res["is_wrong_format"]]
|
| 456 |
+
missed_result.sort(key=lambda r: click_distance(r["parsed_bbox"], r["pred"]), reverse=True)
|
| 457 |
+
failure_cases = wrong_format_result + missed_result
|
| 458 |
+
|
| 459 |
+
with open(failure_cases_path, "w") as f:
|
| 460 |
+
json.dump(failure_cases, f, indent=4, ensure_ascii=False)
|
| 461 |
+
return final_score_dict
|
VLMEvalKit-sudoku/vlmeval/dataset/GUI/screenspot_pro.py
ADDED
|
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import tempfile
|
| 4 |
+
import itertools
|
| 5 |
+
from functools import partial
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import ast
|
| 9 |
+
|
| 10 |
+
from ..image_base import ImageBaseDataset, img_root_map
|
| 11 |
+
from ..utils import build_judge, DEBUG_MESSAGE
|
| 12 |
+
from ...smp import *
|
| 13 |
+
from ...utils import track_progress_rich
|
| 14 |
+
from ipdb import set_trace as st
|
| 15 |
+
|
| 16 |
+
logger = get_logger("RUN")
|
| 17 |
+
|
| 18 |
+
"""
|
| 19 |
+
{
|
| 20 |
+
"img_filename": "web_3b0ad239-da6b-4f6f-8f12-f674dc90ff33.png",
|
| 21 |
+
"bbox": [42, 1102, 197, 70],
|
| 22 |
+
"question": "view the details of the item",
|
| 23 |
+
"data_type": "text",
|
| 24 |
+
"data_source": "shop"
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"img_filename": "web_3b0ad239-da6b-4f6f-8f12-f674dc90ff33.png",
|
| 28 |
+
"bbox": [93, 74, 86, 132],
|
| 29 |
+
"question": "view the previous photo",
|
| 30 |
+
"data_type": "icon",
|
| 31 |
+
"data_source": "shop"
|
| 32 |
+
}
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
SYSTEM_PROMPT = """You are a GUI agent. You are given a task and a screenshot of the screen. You need to perform pyautogui click/moveTo action to complete the task. The answer format is `pyautogui.click(x=?, y=?), x and y is necessary`""" # noqa: E501
|
| 36 |
+
|
| 37 |
+
USER_INSTRUCTION = """Please complete the following tasks by clicking using `pyautogui.click`:\n{instruction}""" # noqa: E501
|
| 38 |
+
|
| 39 |
+
SYSTEM_PROMPT_V2 = """You are a GUI agent. You are given a screenshot of the screen and the description of a target element. You need to click the target element using `pyautogui.click`. The answer format is `pyautogui.click(x=?, y=?), x and y is necessary`""" # noqa: E501
|
| 40 |
+
USER_INSTRUCTION_V2 = """Please click the following target element using `pyautogui.click`:\n{description}"""
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def parse_bbox_aguvis(response):
|
| 44 |
+
match = re.search(r"x=([\d.]+), y=([\d.]+)", response)
|
| 45 |
+
if match:
|
| 46 |
+
click_point = [float(match.group(1)), float(match.group(2))]
|
| 47 |
+
else:
|
| 48 |
+
click_point = [0.0, 0.0]
|
| 49 |
+
return click_point
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def compute_iou(box1, box2):
|
| 53 |
+
"""
|
| 54 |
+
Compute the Intersection over Union (IoU) of two bounding boxes.
|
| 55 |
+
|
| 56 |
+
Parameters:
|
| 57 |
+
- box1 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 58 |
+
- box2 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
- float: IoU of box1 and box2.
|
| 62 |
+
"""
|
| 63 |
+
# Determine the coordinates of the intersection rectangle
|
| 64 |
+
x_left = max(box1[0], box2[0])
|
| 65 |
+
y_top = max(box1[1], box2[1])
|
| 66 |
+
x_right = min(box1[2], box2[2])
|
| 67 |
+
y_bottom = min(box1[3], box2[3])
|
| 68 |
+
|
| 69 |
+
# Compute the area of intersection
|
| 70 |
+
intersection_area = max(0, x_right - x_left) * max(0, y_bottom - y_top)
|
| 71 |
+
|
| 72 |
+
# Compute the area of both bounding boxes
|
| 73 |
+
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
| 74 |
+
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
| 75 |
+
|
| 76 |
+
# Compute the area of the union
|
| 77 |
+
union_area = box1_area + box2_area - intersection_area
|
| 78 |
+
|
| 79 |
+
# Compute the Intersection over Union
|
| 80 |
+
iou = intersection_area / union_area
|
| 81 |
+
|
| 82 |
+
return iou
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def compute_accuracy(box1, box2, threshold=0.5):
|
| 86 |
+
"""
|
| 87 |
+
Compute the accuracy of two bounding boxes based on a specified threshold.
|
| 88 |
+
|
| 89 |
+
Parameters:
|
| 90 |
+
- box1 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 91 |
+
- box2 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 92 |
+
- threshold (float): Threshold for the IoU to consider the prediction correct.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
- float: Accuracy of the prediction based on the IoU threshold.
|
| 96 |
+
"""
|
| 97 |
+
iou = compute_iou(box1, box2)
|
| 98 |
+
return iou >= threshold
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def compute_center_accuracy(box1, box2):
|
| 102 |
+
"""
|
| 103 |
+
Compute if the center point of box 2 is within box 1.
|
| 104 |
+
|
| 105 |
+
Parameters:
|
| 106 |
+
- box1 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 107 |
+
- box2 (list of float): Bounding box [x_min, y_min, x_max, y_max].
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
- bool: True if the center point of box 2 is within box 1, False otherwise.
|
| 111 |
+
"""
|
| 112 |
+
# Compute the center point of box 2
|
| 113 |
+
center_x = (box2[0] + box2[2]) / 2
|
| 114 |
+
center_y = (box2[1] + box2[3]) / 2
|
| 115 |
+
|
| 116 |
+
# Check if the center point is within box 1
|
| 117 |
+
return box1[0] <= center_x <= box1[2] and box1[1] <= center_y <= box1[3]
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def convert_bbox(bbox, image_path, convert_xywh_to_x1y1x2y2=True):
|
| 121 |
+
new_bbox = bbox if isinstance(bbox, list) else ast.literal_eval(bbox)
|
| 122 |
+
if convert_xywh_to_x1y1x2y2:
|
| 123 |
+
new_bbox = [
|
| 124 |
+
new_bbox[0],
|
| 125 |
+
new_bbox[1],
|
| 126 |
+
new_bbox[0] + new_bbox[2],
|
| 127 |
+
new_bbox[1] + new_bbox[3],
|
| 128 |
+
]
|
| 129 |
+
image = Image.open(image_path)
|
| 130 |
+
img_size = image.size
|
| 131 |
+
new_bbox = [
|
| 132 |
+
new_bbox[0] / img_size[0],
|
| 133 |
+
new_bbox[1] / img_size[1],
|
| 134 |
+
new_bbox[2] / img_size[0],
|
| 135 |
+
new_bbox[3] / img_size[1],
|
| 136 |
+
]
|
| 137 |
+
return new_bbox
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class ScreenSpot_Pro(ImageBaseDataset):
|
| 141 |
+
MODALITY = "IMAGE"
|
| 142 |
+
TYPE = "GUI"
|
| 143 |
+
DATASET_URL = {
|
| 144 |
+
"ScreenSpot_Pro_Development": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_Pro/ScreenSpot_Pro_Development.tsv", # noqa
|
| 145 |
+
"ScreenSpot_Pro_Creative": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_Pro/ScreenSpot_Pro_Creative.tsv", # noqa
|
| 146 |
+
"ScreenSpot_Pro_CAD": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_Pro/ScreenSpot_Pro_CAD.tsv", # noqa
|
| 147 |
+
"ScreenSpot_Pro_Scientific": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_Pro/ScreenSpot_Pro_Scientific.tsv", # noqa
|
| 148 |
+
"ScreenSpot_Pro_Office": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_Pro/ScreenSpot_Pro_Office.tsv", # noqa
|
| 149 |
+
"ScreenSpot_Pro_OS": "http://opencompass.openxlab.space/utils/benchmarks/GUI/ScreenSpot_Pro/ScreenSpot_Pro_OS.tsv", # noqa
|
| 150 |
+
} # path
|
| 151 |
+
DATASET_MD5 = {
|
| 152 |
+
'ScreenSpot_Pro_Development': '45b93df1d5814885011d682fe1b0f959',
|
| 153 |
+
'ScreenSpot_Pro_Creative': 'a15867fee82ba8cd95581895c55f03cd',
|
| 154 |
+
'ScreenSpot_Pro_CAD': '0faa3bc29eba359766c3a7ca2c4d8917',
|
| 155 |
+
'ScreenSpot_Pro_Scientific': 'edc2e1f2b53af5fff6480b77c4986b81',
|
| 156 |
+
'ScreenSpot_Pro_Office': '8756c128cf567274c2647423ccc4eaf0',
|
| 157 |
+
'ScreenSpot_Pro_OS': '49c3eaaa7df6d22475c39120fe8f1c06'
|
| 158 |
+
}
|
| 159 |
+
EVAL_TYPE = "point" # point or rectangle
|
| 160 |
+
RE_TYPE = "functional" # type of referring expressions: functional or composite
|
| 161 |
+
|
| 162 |
+
def __init__(
|
| 163 |
+
self,
|
| 164 |
+
dataset="ScreenSpot_Pro_Development",
|
| 165 |
+
skip_noimg=True,
|
| 166 |
+
skeleton=False,
|
| 167 |
+
re_type="functional",
|
| 168 |
+
):
|
| 169 |
+
# st()
|
| 170 |
+
ROOT = LMUDataRoot()
|
| 171 |
+
# You can override this variable to save image files to a different directory
|
| 172 |
+
self.dataset_name = dataset
|
| 173 |
+
self.img_root = osp.join(ROOT, "images", self.dataset_name)
|
| 174 |
+
self.RE_TYPE = re_type
|
| 175 |
+
if skeleton:
|
| 176 |
+
return
|
| 177 |
+
|
| 178 |
+
data = self.load_data(dataset)
|
| 179 |
+
self.skip_noimg = skip_noimg
|
| 180 |
+
if skip_noimg and "image" in data:
|
| 181 |
+
data = data[~pd.isna(data["image"])]
|
| 182 |
+
|
| 183 |
+
data["index"] = [str(idx + 1) for idx, x in enumerate(data["bbox"])]
|
| 184 |
+
|
| 185 |
+
self.meta_only = True
|
| 186 |
+
self.parse_response_func = parse_bbox_aguvis # TODO: parse function can be specified through kwargs when initializing the dataset # noqa: E501
|
| 187 |
+
|
| 188 |
+
# The image field can store the base64 encoded image or another question index (for saving space) # noqa: E501
|
| 189 |
+
if "image" in data:
|
| 190 |
+
data["image"] = [str(x) for x in data["image"]]
|
| 191 |
+
image_map = {x: y for x, y in zip(data["index"], data["image"])}
|
| 192 |
+
for k in image_map:
|
| 193 |
+
if len(image_map[k]) <= 64:
|
| 194 |
+
idx = image_map[k]
|
| 195 |
+
assert idx in image_map and len(image_map[idx]) > 64
|
| 196 |
+
image_map[k] = image_map[idx]
|
| 197 |
+
|
| 198 |
+
images = [toliststr(image_map[k]) for k in data["index"]]
|
| 199 |
+
data["image"] = [x[0] if len(x) == 1 else x for x in images]
|
| 200 |
+
self.meta_only = False
|
| 201 |
+
|
| 202 |
+
self.data = data
|
| 203 |
+
|
| 204 |
+
@classmethod
|
| 205 |
+
def get_action_space(self):
|
| 206 |
+
return ""
|
| 207 |
+
|
| 208 |
+
@classmethod
|
| 209 |
+
def get_trajectory(self, line):
|
| 210 |
+
traj_dict = {}
|
| 211 |
+
if self.RE_TYPE == "functional":
|
| 212 |
+
traj_dict["task"] = line["question"]
|
| 213 |
+
else:
|
| 214 |
+
traj_dict["task"] = line["description"]
|
| 215 |
+
return traj_dict
|
| 216 |
+
|
| 217 |
+
def build_prompt(self, line):
|
| 218 |
+
if isinstance(line, int):
|
| 219 |
+
line = self.data.iloc[line]
|
| 220 |
+
tgt_path = self.dump_image(line)
|
| 221 |
+
|
| 222 |
+
if self.RE_TYPE == "functional":
|
| 223 |
+
user_instruction = USER_INSTRUCTION.format(instruction=line["question"])
|
| 224 |
+
else:
|
| 225 |
+
user_instruction = USER_INSTRUCTION_V2.format(
|
| 226 |
+
description=line["description"]
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
msgs = []
|
| 230 |
+
# add system prompt
|
| 231 |
+
if self.RE_TYPE == "functional":
|
| 232 |
+
msgs.append(dict(role="system", type="text", value=SYSTEM_PROMPT))
|
| 233 |
+
else:
|
| 234 |
+
msgs.append(dict(role="system", type="text", value=SYSTEM_PROMPT_V2))
|
| 235 |
+
if isinstance(tgt_path, list):
|
| 236 |
+
msgs.extend([dict(type="image", value=p) for p in tgt_path])
|
| 237 |
+
else:
|
| 238 |
+
msgs = [dict(type="image", value=tgt_path)]
|
| 239 |
+
msgs.append(dict(type="text", value=user_instruction))
|
| 240 |
+
return msgs
|
| 241 |
+
|
| 242 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
| 243 |
+
# st()
|
| 244 |
+
if self.EVAL_TYPE == "point":
|
| 245 |
+
return self.evaluate_point(eval_file, **judge_kwargs)
|
| 246 |
+
|
| 247 |
+
elif self.EVAL_TYPE == "rectangle":
|
| 248 |
+
return self.evaluate_rectangle(eval_file, **judge_kwargs)
|
| 249 |
+
|
| 250 |
+
def evaluate_rectangle(self, eval_file, **judge_kwargs):
|
| 251 |
+
scorers = {
|
| 252 |
+
"IoU": compute_iou,
|
| 253 |
+
"ACC@0.1": lambda x, y: compute_accuracy(x, y, 0.1),
|
| 254 |
+
"ACC@0.3": lambda x, y: compute_accuracy(x, y, 0.3),
|
| 255 |
+
"ACC@0.5": lambda x, y: compute_accuracy(x, y, 0.5),
|
| 256 |
+
"ACC@0.7": lambda x, y: compute_accuracy(x, y, 0.7),
|
| 257 |
+
"ACC@0.9": lambda x, y: compute_accuracy(x, y, 0.9),
|
| 258 |
+
"Center_ACC": compute_center_accuracy,
|
| 259 |
+
}
|
| 260 |
+
results_dict = {}
|
| 261 |
+
for key in scorers.keys():
|
| 262 |
+
results_dict.update(
|
| 263 |
+
{
|
| 264 |
+
key: [],
|
| 265 |
+
key + "_text": [],
|
| 266 |
+
key + "_icon": [],
|
| 267 |
+
}
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
result = []
|
| 271 |
+
data = load(eval_file)
|
| 272 |
+
assert "bbox" in data and "prediction" in data
|
| 273 |
+
lt = len(data)
|
| 274 |
+
lines = [data.iloc[i] for i in range(lt)]
|
| 275 |
+
for i in tqdm(range(len(lines))):
|
| 276 |
+
line = lines[i]
|
| 277 |
+
bbox = convert_bbox(
|
| 278 |
+
line["bbox"], os.path.join(self.img_root, line["image_path"]), convert_xywh_to_x1y1x2y2=False
|
| 279 |
+
)
|
| 280 |
+
prediction = str(line["prediction"])
|
| 281 |
+
try:
|
| 282 |
+
click_point = parse_bbox_aguvis(prediction)
|
| 283 |
+
|
| 284 |
+
match = {}
|
| 285 |
+
for score_key, score_value in scorers.items():
|
| 286 |
+
score = score_value(bbox, click_point)
|
| 287 |
+
if score_key != "IoU":
|
| 288 |
+
match[score_key.replace("ACC", "match")] = score
|
| 289 |
+
results_dict[score_key].append(score)
|
| 290 |
+
if line["ui_type"] == "text":
|
| 291 |
+
results_dict[score_key + "_text"].append(score)
|
| 292 |
+
else:
|
| 293 |
+
results_dict[score_key + "_icon"].append(score)
|
| 294 |
+
except:
|
| 295 |
+
click_point = None
|
| 296 |
+
match = {score_key: False for score_key in scorers.keys() if score_key != "IoU"}
|
| 297 |
+
result.append(
|
| 298 |
+
{
|
| 299 |
+
"img_path": os.path.join(self.img_root, line["image_path"]),
|
| 300 |
+
"text": line["question"],
|
| 301 |
+
"bbox": line["bbox"],
|
| 302 |
+
"parsed_bbox": bbox,
|
| 303 |
+
"type": line["ui_type"],
|
| 304 |
+
"source": line["application"],
|
| 305 |
+
"pred": click_point,
|
| 306 |
+
"num_matched": sum(match.values()),
|
| 307 |
+
**match,
|
| 308 |
+
}
|
| 309 |
+
)
|
| 310 |
+
for key in results_dict:
|
| 311 |
+
if len(results_dict[key]) == 0:
|
| 312 |
+
results_dict[key] = str(0)
|
| 313 |
+
else:
|
| 314 |
+
results_dict[key] = str(sum(results_dict[key]) / len(results_dict[key]))
|
| 315 |
+
score_pth = get_intermediate_file_path(eval_file, '_score', 'json')
|
| 316 |
+
dump(results_dict, score_pth)
|
| 317 |
+
|
| 318 |
+
failure_cases_path = os.environ.get("FAILURE_CASES_PATH", None)
|
| 319 |
+
if failure_cases_path is not None:
|
| 320 |
+
failure_cases = [res for res in result if not res["match"] and res["is_wrong_format"]]
|
| 321 |
+
failure_cases.sort(key=lambda r: r["num_matched"], reverse=True)
|
| 322 |
+
|
| 323 |
+
with open(failure_cases_path, "w") as f:
|
| 324 |
+
json.dump(failure_cases, f, indent=4, ensure_ascii=False)
|
| 325 |
+
return results_dict
|
| 326 |
+
|
| 327 |
+
def evaluate_point(self, eval_file, **judge_kwargs):
|
| 328 |
+
# -1: format_err, 0: wrong, 1: correct
|
| 329 |
+
stats = defaultdict(list)
|
| 330 |
+
# Will include instance-level results
|
| 331 |
+
result = []
|
| 332 |
+
|
| 333 |
+
data = load(eval_file)
|
| 334 |
+
assert "bbox" in data and "prediction" in data
|
| 335 |
+
lt = len(data)
|
| 336 |
+
lines = [data.iloc[i] for i in range(lt)]
|
| 337 |
+
for i in tqdm(range(len(lines))):
|
| 338 |
+
line = lines[i]
|
| 339 |
+
bbox = (
|
| 340 |
+
line["bbox"]
|
| 341 |
+
if isinstance(line["bbox"], list)
|
| 342 |
+
else ast.literal_eval(line["bbox"])
|
| 343 |
+
)
|
| 344 |
+
# The format of bbox is (x1, y1, x2, y2)
|
| 345 |
+
|
| 346 |
+
image = Image.open(os.path.join(self.img_root, line["image_path"]))
|
| 347 |
+
img_size = image.size
|
| 348 |
+
|
| 349 |
+
def make_safe(value):
|
| 350 |
+
if value == -1:
|
| 351 |
+
# we can tolerate -1 as a special value and nomalize it to 0
|
| 352 |
+
return 0
|
| 353 |
+
else:
|
| 354 |
+
return value
|
| 355 |
+
|
| 356 |
+
bbox = [
|
| 357 |
+
make_safe(bbox[0]) / img_size[0],
|
| 358 |
+
make_safe(bbox[1]) / img_size[1],
|
| 359 |
+
make_safe(bbox[2]) / img_size[0],
|
| 360 |
+
make_safe(bbox[3]) / img_size[1],
|
| 361 |
+
]
|
| 362 |
+
|
| 363 |
+
if any([x < 0 or x > 1 for x in bbox]):
|
| 364 |
+
raise ValueError(f"bbox out of range: {bbox} | {line['bbox']} | {img_size}")
|
| 365 |
+
|
| 366 |
+
key = line["category"] + ":" + line['ui_type']
|
| 367 |
+
prediction = str(line["prediction"])
|
| 368 |
+
try:
|
| 369 |
+
click_point = self.parse_response_func(prediction)
|
| 370 |
+
# Do Normalization By Default
|
| 371 |
+
if click_point[0] > 1 or click_point[1] > 1:
|
| 372 |
+
click_point = (click_point[0] / img_size[0], click_point[1] / img_size[1])
|
| 373 |
+
|
| 374 |
+
match = (bbox[0] <= click_point[0] <= bbox[2]) and \
|
| 375 |
+
(bbox[1] <= click_point[1] <= bbox[3])
|
| 376 |
+
|
| 377 |
+
if match:
|
| 378 |
+
stats[key].append(1)
|
| 379 |
+
else:
|
| 380 |
+
stats[key].append(0)
|
| 381 |
+
is_wrong_format = False
|
| 382 |
+
|
| 383 |
+
except Exception as e:
|
| 384 |
+
logger.warning(f"exception in screenspot eval:{e}")
|
| 385 |
+
stats[key].append(-1)
|
| 386 |
+
match, is_wrong_format, click_point = False, True, None
|
| 387 |
+
|
| 388 |
+
result.append(
|
| 389 |
+
{
|
| 390 |
+
"img_path": os.path.join(self.img_root, line["image_path"]),
|
| 391 |
+
"text": line["question"],
|
| 392 |
+
"bbox": line["bbox"],
|
| 393 |
+
"parsed_bbox": bbox,
|
| 394 |
+
"type": line["ui_type"],
|
| 395 |
+
"source": line["application"],
|
| 396 |
+
"match": match,
|
| 397 |
+
"is_wrong_format": is_wrong_format,
|
| 398 |
+
"pred": click_point,
|
| 399 |
+
}
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
final_score_dict = {}
|
| 403 |
+
# Record the number of each category
|
| 404 |
+
final_score_dict.update({k + ':cnt': len(stats[k]) for k in stats})
|
| 405 |
+
# Calculate the Overall stats
|
| 406 |
+
full_stats = []
|
| 407 |
+
for v in stats.values():
|
| 408 |
+
full_stats.extend(v)
|
| 409 |
+
final_score_dict['Overall_Accuracy'] = np.mean([x > 0 for x in full_stats]) * 100
|
| 410 |
+
final_score_dict['Format_Err_Rate'] = np.mean([x < 0 for x in full_stats]) * 100
|
| 411 |
+
# Calculate the Accuracy of Text / Icon
|
| 412 |
+
text_stats = [v for k, v in stats.items() if k.split(":")[1] == "text" for x in v]
|
| 413 |
+
text_stats = itertools.chain(*text_stats)
|
| 414 |
+
final_score_dict['Text_Accuracy'] = np.mean([x > 0 for x in text_stats]) * 100
|
| 415 |
+
icon_stats = [v for k, v in stats.items() if k.split(":")[1] == "icon" for x in v]
|
| 416 |
+
icon_stats = itertools.chain(*icon_stats)
|
| 417 |
+
final_score_dict['Icon_Accuracy'] = np.mean([x > 0 for x in icon_stats]) * 100
|
| 418 |
+
# Calculate the Accuracy of Each Category
|
| 419 |
+
cates = list(set(data['category']))
|
| 420 |
+
for c in cates:
|
| 421 |
+
sub_stats = [v for k, v in stats.items() if k.split(":")[0] == c for x in v]
|
| 422 |
+
sub_stats = itertools.chain(*sub_stats)
|
| 423 |
+
final_score_dict[c + '_Accuracy'] = np.mean([x > 0 for x in sub_stats]) * 100
|
| 424 |
+
|
| 425 |
+
score_pth = get_intermediate_file_path(eval_file, '_score', 'json')
|
| 426 |
+
dump(final_score_dict, score_pth)
|
| 427 |
+
|
| 428 |
+
failure_cases_path = os.environ.get("FAILURE_CASES_PATH", None)
|
| 429 |
+
if failure_cases_path is not None:
|
| 430 |
+
def click_distance(bbox, click_point):
|
| 431 |
+
x, y = click_point
|
| 432 |
+
x1, y1, x2, y2 = bbox
|
| 433 |
+
xc, yc = (x1 + x2) / 2, (y1 + y2) / 2
|
| 434 |
+
w, h = x2 - x1, y2 - y1
|
| 435 |
+
abs_shift_to_center = [abs(x - xc), abs(y - yc)] # noqa: E501
|
| 436 |
+
width_outside, height_outside = [max(0, abs_shift_to_center[0] - w / 2), max(0, abs_shift_to_center[1] - h / 2)] # noqa: E501
|
| 437 |
+
return (width_outside ** 2 + height_outside ** 2) ** 0.5 # noqa: E501
|
| 438 |
+
|
| 439 |
+
wrong_format_result = [res for res in result if res["is_wrong_format"]]
|
| 440 |
+
missed_result = [res for res in result if not res["match"] and not res["is_wrong_format"]]
|
| 441 |
+
missed_result.sort(key=lambda r: click_distance(r["parsed_bbox"], r["pred"]), reverse=True)
|
| 442 |
+
failure_cases = wrong_format_result + missed_result
|
| 443 |
+
|
| 444 |
+
with open(failure_cases_path, "w") as f:
|
| 445 |
+
json.dump(failure_cases, f, indent=4, ensure_ascii=False)
|
| 446 |
+
|
| 447 |
+
successful_cases_path = os.environ.get("SUCCESSFUL_CASES_PATH", None)
|
| 448 |
+
if successful_cases_path is not None:
|
| 449 |
+
def _click_distance(bbox, click_point):
|
| 450 |
+
x, y = click_point
|
| 451 |
+
x1, y1, x2, y2 = bbox
|
| 452 |
+
xc, yc = (x1 + x2) / 2, (y1 + y2) / 2
|
| 453 |
+
x_shift, y_shift = x - xc, y - yc
|
| 454 |
+
return (x_shift ** 2 + y_shift ** 2) ** 0.5
|
| 455 |
+
|
| 456 |
+
successful_cases = [res for res in result if res["match"]]
|
| 457 |
+
successful_cases.sort(key=lambda r: _click_distance(r["parsed_bbox"], r["pred"]), reverse=True)
|
| 458 |
+
with open(successful_cases_path, "w") as f:
|
| 459 |
+
json.dump(successful_cases, f, indent=4, ensure_ascii=False)
|
| 460 |
+
return final_score_dict
|
VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (161 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/data_preprocess.py
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import unicodedata
|
| 3 |
+
from pylatexenc.latex2text import LatexNodes2Text
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
import subprocess
|
| 6 |
+
import shutil
|
| 7 |
+
import uuid
|
| 8 |
+
import html
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
def remove_markdown_fences(content):
|
| 12 |
+
content = re.sub(r'^```markdown\n?', '', content, flags=re.MULTILINE)
|
| 13 |
+
content = re.sub(r'```\n?$', '', content, flags=re.MULTILINE)
|
| 14 |
+
return content
|
| 15 |
+
|
| 16 |
+
# Standardize all consecutive characters
|
| 17 |
+
def replace_repeated_chars(input_str):
|
| 18 |
+
input_str = re.sub(r'_{4,}', '____', input_str) # Replace more than 4 consecutive underscores with 4 underscores
|
| 19 |
+
input_str = re.sub(r' {4,}', ' ', input_str) # Replace more than 4 consecutive spaces with 4 spaces
|
| 20 |
+
return re.sub(r'([^a-zA-Z0-9])\1{10,}', r'\1\1\1\1', input_str) # For other consecutive symbols (except numbers and letters), replace more than 10 occurrences with 4
|
| 21 |
+
|
| 22 |
+
# Special Unicode handling
|
| 23 |
+
def fullwidth_to_halfwidth(s):
|
| 24 |
+
result = []
|
| 25 |
+
for char in s:
|
| 26 |
+
code = ord(char)
|
| 27 |
+
# Convert full-width space to half-width space
|
| 28 |
+
if code == 0x3000:
|
| 29 |
+
code = 0x0020
|
| 30 |
+
# Convert other full-width characters to half-width
|
| 31 |
+
elif 0xFF01 <= code <= 0xFF5E:
|
| 32 |
+
code -= 0xFEE0
|
| 33 |
+
result.append(chr(code))
|
| 34 |
+
return ''.join(result)
|
| 35 |
+
|
| 36 |
+
def find_special_unicode(s):
|
| 37 |
+
special_chars = {}
|
| 38 |
+
for char in s:
|
| 39 |
+
if ord(char) > 127: # Non-ASCII characters
|
| 40 |
+
# unicode_name = unicodedata.name(char, None)
|
| 41 |
+
unicode_name = unicodedata.category(char)
|
| 42 |
+
special_chars[char] = f'U+{ord(char):04X} ({unicode_name})'
|
| 43 |
+
return special_chars
|
| 44 |
+
|
| 45 |
+
# # Define dictionary for Unicode character replacements
|
| 46 |
+
# unicode_replacements = {
|
| 47 |
+
# "\u00A9": r"$\copyright$", # Copyright symbol © to latex
|
| 48 |
+
# "\u00AE": r"$^\circledR$", # Registered trademark ® to latex
|
| 49 |
+
# "\u2122": r"$^\text{TM}$", # Trademark ™ to latex
|
| 50 |
+
# "\u2018": "'", # Left single quote to straight quote
|
| 51 |
+
# "\u2019": "'", # Right single quote to straight quote
|
| 52 |
+
# "\u201C": "\"", # Left double quote to straight quote
|
| 53 |
+
# "\u201D": "\"", # Right double quote to straight quote
|
| 54 |
+
# "\u2013": "-", # En dash to hyphen
|
| 55 |
+
# "\u2014": "-", # Em dash to hyphen
|
| 56 |
+
# "\u2026": "...", # Unicode ellipsis to three dots
|
| 57 |
+
# "\u2103": r"$\textdegree C$", # ℃
|
| 58 |
+
# "\u03B1": r"$\alpha$", # α
|
| 59 |
+
# "\u03B2": r"$\beta$", # β
|
| 60 |
+
# "\u03A3": r"$\Sigma$", # Σ
|
| 61 |
+
# }
|
| 62 |
+
|
| 63 |
+
# # Use regex to replace Unicode characters
|
| 64 |
+
# def replace_unicode(match):
|
| 65 |
+
# char = match.group(0)
|
| 66 |
+
# return unicode_replacements.get(char, char)
|
| 67 |
+
|
| 68 |
+
inline_reg = re.compile(
|
| 69 |
+
r'\$(.*?)\$|'
|
| 70 |
+
r'\\\((.*?)\\\)',
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def textblock2unicode(text):
|
| 74 |
+
inline_matches = inline_reg.finditer(text)
|
| 75 |
+
removal_positions = []
|
| 76 |
+
for match in inline_matches:
|
| 77 |
+
position = [match.start(), match.end()]
|
| 78 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 79 |
+
# print('-------- content-------', content)
|
| 80 |
+
# Remove escape characters \
|
| 81 |
+
clean_content = re.sub(r'\\([\\_&%^])', '', content)
|
| 82 |
+
|
| 83 |
+
try:
|
| 84 |
+
if any(char in clean_content for char in r'\^_'):
|
| 85 |
+
if clean_content.endswith('\\'):
|
| 86 |
+
clean_content += ' '
|
| 87 |
+
# inline_array.append(match.group(0))
|
| 88 |
+
unicode_content = LatexNodes2Text().latex_to_text(clean_content)
|
| 89 |
+
removal_positions.append((position[0], position[1], unicode_content))
|
| 90 |
+
except:
|
| 91 |
+
continue
|
| 92 |
+
|
| 93 |
+
# Remove inline formulas from original text
|
| 94 |
+
for start, end, unicode_content in sorted(removal_positions, reverse=True):
|
| 95 |
+
text = text[:start] + unicode_content.strip() + text[end:]
|
| 96 |
+
|
| 97 |
+
return text
|
| 98 |
+
|
| 99 |
+
def normalized_formula(text):
|
| 100 |
+
# Normalize math formulas before matching
|
| 101 |
+
filter_list = ['\\mathbf', '\\mathrm', '\\mathnormal', '\\mathit', '\\mathbb', '\\mathcal', '\\mathscr', '\\mathfrak', '\\mathsf', '\\mathtt',
|
| 102 |
+
'\\textbf', '\\text', '\\boldmath', '\\boldsymbol', '\\operatorname', '\\bm',
|
| 103 |
+
'\\symbfit', '\\mathbfcal', '\\symbf', '\\scriptscriptstyle', '\\notag',
|
| 104 |
+
'\\setlength', '\\coloneqq', '\\space', '\\thickspace', '\\thinspace', '\\medspace', '\\nobreakspace', '\\negmedspace',
|
| 105 |
+
'\\quad', '\\qquad', '\\enspace', '\\substackw', ' ']
|
| 106 |
+
# '\\left', '\\right', '{', '}', ' ']
|
| 107 |
+
|
| 108 |
+
# delimiter_filter
|
| 109 |
+
pattern = re.compile(r"\\\[(.+?)(?<!\\)\\\]")
|
| 110 |
+
match = pattern.search(text)
|
| 111 |
+
|
| 112 |
+
if match:
|
| 113 |
+
text = match.group(1).strip()
|
| 114 |
+
|
| 115 |
+
tag_pattern = re.compile(r"\\tag\{.*?\}")
|
| 116 |
+
text = tag_pattern.sub('', text)
|
| 117 |
+
hspace_pattern = re.compile(r"\\hspace\{.*?\}")
|
| 118 |
+
text = hspace_pattern.sub('', text)
|
| 119 |
+
begin_pattern = re.compile(r"\\begin\{.*?\}")
|
| 120 |
+
text = begin_pattern.sub('', text)
|
| 121 |
+
end_pattern = re.compile(r"\\end\{.*?\}")
|
| 122 |
+
text = end_pattern.sub('', text)
|
| 123 |
+
col_sep = re.compile(r"\\arraycolsep.*?\}")
|
| 124 |
+
text = col_sep.sub('', text)
|
| 125 |
+
text = text.strip('.')
|
| 126 |
+
|
| 127 |
+
for filter_text in filter_list:
|
| 128 |
+
text = text.replace(filter_text, '')
|
| 129 |
+
|
| 130 |
+
# text = normalize_text(delimiter_filter(text))
|
| 131 |
+
# text = delimiter_filter(text)
|
| 132 |
+
text = text.lower()
|
| 133 |
+
return text
|
| 134 |
+
|
| 135 |
+
def normalized_html_table(text):
|
| 136 |
+
def process_table_html(md_i):
|
| 137 |
+
"""
|
| 138 |
+
pred_md format edit
|
| 139 |
+
"""
|
| 140 |
+
def process_table_html(html_content):
|
| 141 |
+
soup = BeautifulSoup(html_content, 'html.parser')
|
| 142 |
+
th_tags = soup.find_all('th')
|
| 143 |
+
for th in th_tags:
|
| 144 |
+
th.name = 'td'
|
| 145 |
+
thead_tags = soup.find_all('thead')
|
| 146 |
+
for thead in thead_tags:
|
| 147 |
+
thead.unwrap() # unwrap()会移除标签但保留其内容
|
| 148 |
+
math_tags = soup.find_all('math')
|
| 149 |
+
for math_tag in math_tags:
|
| 150 |
+
alttext = math_tag.get('alttext', '')
|
| 151 |
+
alttext = f'${alttext}$'
|
| 152 |
+
if alttext:
|
| 153 |
+
math_tag.replace_with(alttext)
|
| 154 |
+
span_tags = soup.find_all('span')
|
| 155 |
+
for span in span_tags:
|
| 156 |
+
span.unwrap()
|
| 157 |
+
return str(soup)
|
| 158 |
+
|
| 159 |
+
table_res=''
|
| 160 |
+
table_res_no_space=''
|
| 161 |
+
if '<table' in md_i.replace(" ","").replace("'",'"'):
|
| 162 |
+
md_i = process_table_html(md_i)
|
| 163 |
+
table_res = html.unescape(md_i).replace('\n', '')
|
| 164 |
+
table_res = unicodedata.normalize('NFKC', table_res).strip()
|
| 165 |
+
pattern = r'<table\b[^>]*>(.*)</table>'
|
| 166 |
+
tables = re.findall(pattern, table_res, re.DOTALL | re.IGNORECASE)
|
| 167 |
+
table_res = ''.join(tables)
|
| 168 |
+
# table_res = re.sub('<table.*?>','',table_res)
|
| 169 |
+
table_res = re.sub('( style=".*?")', "", table_res)
|
| 170 |
+
table_res = re.sub('( height=".*?")', "", table_res)
|
| 171 |
+
table_res = re.sub('( width=".*?")', "", table_res)
|
| 172 |
+
table_res = re.sub('( align=".*?")', "", table_res)
|
| 173 |
+
table_res = re.sub('( class=".*?")', "", table_res)
|
| 174 |
+
table_res = re.sub('</?tbody>',"",table_res)
|
| 175 |
+
|
| 176 |
+
table_res = re.sub(r'\s+', " ", table_res)
|
| 177 |
+
table_res_no_space = '<html><body><table border="1" >' + table_res.replace(' ','') + '</table></body></html>'
|
| 178 |
+
# table_res_no_space = re.sub(' (style=".*?")',"",table_res_no_space)
|
| 179 |
+
# table_res_no_space = re.sub(r'[ ]', " ", table_res_no_space)
|
| 180 |
+
table_res_no_space = re.sub('colspan="', ' colspan="', table_res_no_space)
|
| 181 |
+
table_res_no_space = re.sub('rowspan="', ' rowspan="', table_res_no_space)
|
| 182 |
+
table_res_no_space = re.sub('border="', ' border="', table_res_no_space)
|
| 183 |
+
|
| 184 |
+
table_res = '<html><body><table border="1" >' + table_res + '</table></body></html>'
|
| 185 |
+
# table_flow.append(table_res)
|
| 186 |
+
# table_flow_no_space.append(table_res_no_space)
|
| 187 |
+
|
| 188 |
+
return table_res, table_res_no_space
|
| 189 |
+
|
| 190 |
+
def clean_table(input_str,flag=True):
|
| 191 |
+
if flag:
|
| 192 |
+
input_str = input_str.replace('<sup>', '').replace('</sup>', '')
|
| 193 |
+
input_str = input_str.replace('<sub>', '').replace('</sub>', '')
|
| 194 |
+
input_str = input_str.replace('<span>', '').replace('</span>', '')
|
| 195 |
+
input_str = input_str.replace('<div>', '').replace('</div>', '')
|
| 196 |
+
input_str = input_str.replace('<p>', '').replace('</p>', '')
|
| 197 |
+
input_str = input_str.replace('<spandata-span-identity="">', '')
|
| 198 |
+
input_str = re.sub('<colgroup>.*?</colgroup>','',input_str)
|
| 199 |
+
return input_str
|
| 200 |
+
|
| 201 |
+
norm_text, _ = process_table_html(text)
|
| 202 |
+
norm_text = clean_table(norm_text)
|
| 203 |
+
return norm_text
|
| 204 |
+
|
| 205 |
+
def normalized_latex_table(text):
|
| 206 |
+
def latex_template(latex_code):
|
| 207 |
+
template = r'''
|
| 208 |
+
\documentclass[border=20pt]{article}
|
| 209 |
+
\usepackage{subcaption}
|
| 210 |
+
\usepackage{url}
|
| 211 |
+
\usepackage{graphicx}
|
| 212 |
+
\usepackage{caption}
|
| 213 |
+
\usepackage{multirow}
|
| 214 |
+
\usepackage{booktabs}
|
| 215 |
+
\usepackage{color}
|
| 216 |
+
\usepackage{colortbl}
|
| 217 |
+
\usepackage{xcolor,soul,framed}
|
| 218 |
+
\usepackage{fontspec}
|
| 219 |
+
\usepackage{amsmath,amssymb,mathtools,bm,mathrsfs,textcomp}
|
| 220 |
+
\setlength{\parindent}{0pt}''' + \
|
| 221 |
+
r'''
|
| 222 |
+
\begin{document}
|
| 223 |
+
''' + \
|
| 224 |
+
latex_code + \
|
| 225 |
+
r'''
|
| 226 |
+
\end{document}'''
|
| 227 |
+
|
| 228 |
+
return template
|
| 229 |
+
|
| 230 |
+
def process_table_latex(latex_code):
|
| 231 |
+
SPECIAL_STRINGS= [
|
| 232 |
+
['\\\\vspace\\{.*?\\}', ''],
|
| 233 |
+
['\\\\hspace\\{.*?\\}', ''],
|
| 234 |
+
['\\\\rule\{.*?\\}\\{.*?\\}', ''],
|
| 235 |
+
['\\\\addlinespace\\[.*?\\]', ''],
|
| 236 |
+
['\\\\addlinespace', ''],
|
| 237 |
+
['\\\\renewcommand\\{\\\\arraystretch\\}\\{.*?\\}', ''],
|
| 238 |
+
['\\\\arraystretch\\{.*?\\}', ''],
|
| 239 |
+
['\\\\(row|column)?colors?\\{[^}]*\\}(\\{[^}]*\\}){0,2}', ''],
|
| 240 |
+
['\\\\color\\{.*?\\}', ''],
|
| 241 |
+
['\\\\textcolor\\{.*?\\}', ''],
|
| 242 |
+
['\\\\rowcolor(\\[.*?\\])?\\{.*?\\}', ''],
|
| 243 |
+
['\\\\columncolor(\\[.*?\\])?\\{.*?\\}', ''],
|
| 244 |
+
['\\\\cellcolor(\\[.*?\\])?\\{.*?\\}', ''],
|
| 245 |
+
['\\\\colorbox\\{.*?\\}', ''],
|
| 246 |
+
['\\\\(tiny|scriptsize|footnotesize|small|normalsize|large|Large|LARGE|huge|Huge)', ''],
|
| 247 |
+
[r'\s+', ' '],
|
| 248 |
+
['\\\\centering', ''],
|
| 249 |
+
['\\\\begin\\{table\\}\\[.*?\\]', '\\\\begin{table}'],
|
| 250 |
+
['\t', ''],
|
| 251 |
+
['@{}', ''],
|
| 252 |
+
['\\\\toprule(\\[.*?\\])?', '\\\\hline'],
|
| 253 |
+
['\\\\bottomrule(\\[.*?\\])?', '\\\\hline'],
|
| 254 |
+
['\\\\midrule(\\[.*?\\])?', '\\\\hline'],
|
| 255 |
+
['p\\{[^}]*\\}', 'l'],
|
| 256 |
+
['m\\{[^}]*\\}', 'c'],
|
| 257 |
+
['\\\\scalebox\\{[^}]*\\}\\{([^}]*)\\}', '\\1'],
|
| 258 |
+
['\\\\textbf\\{([^}]*)\\}', '\\1'],
|
| 259 |
+
['\\\\textit\\{([^}]*)\\}', '\\1'],
|
| 260 |
+
['\\\\cmidrule(\\[.*?\\])?\\(.*?\\)\\{([0-9]-[0-9])\\}', '\\\\cline{\\2}'],
|
| 261 |
+
['\\\\hline', ''],
|
| 262 |
+
[r'\\multicolumn\{1\}\{[^}]*\}\{((?:[^{}]|(?:\{[^{}]*\}))*)\}', r'\1']
|
| 263 |
+
]
|
| 264 |
+
pattern = r'\\begin\{tabular\}.*\\end\{tabular\}' # 注意这里不用 .*?
|
| 265 |
+
matches = re.findall(pattern, latex_code, re.DOTALL)
|
| 266 |
+
latex_code = ' '.join(matches)
|
| 267 |
+
|
| 268 |
+
for special_str in SPECIAL_STRINGS:
|
| 269 |
+
latex_code = re.sub(fr'{special_str[0]}', fr'{special_str[1]}', latex_code)
|
| 270 |
+
|
| 271 |
+
return latex_code
|
| 272 |
+
|
| 273 |
+
def convert_latex_to_html(latex_content, cache_dir='./temp'):
|
| 274 |
+
if not os.path.exists(cache_dir):
|
| 275 |
+
os.makedirs(cache_dir)
|
| 276 |
+
|
| 277 |
+
uuid_str = str(uuid.uuid1())
|
| 278 |
+
with open(f'{cache_dir}/{uuid_str}.tex', 'w') as f:
|
| 279 |
+
f.write(latex_template(latex_content))
|
| 280 |
+
|
| 281 |
+
cmd = ['latexmlc', '--quiet', '--nocomments', f'--log={cache_dir}/{uuid_str}.log',
|
| 282 |
+
f'{cache_dir}/{uuid_str}.tex', f'--dest={cache_dir}/{uuid_str}.html']
|
| 283 |
+
try:
|
| 284 |
+
subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
| 285 |
+
with open(f'{cache_dir}/{uuid_str}.html', 'r') as f:
|
| 286 |
+
html_content = f.read()
|
| 287 |
+
|
| 288 |
+
pattern = r'<table\b[^>]*>(.*)</table>'
|
| 289 |
+
tables = re.findall(pattern, html_content, re.DOTALL | re.IGNORECASE)
|
| 290 |
+
tables = [f'<table>{table}</table>' for table in tables]
|
| 291 |
+
html_content = '\n'.join(tables)
|
| 292 |
+
|
| 293 |
+
except Exception as e:
|
| 294 |
+
html_content = ''
|
| 295 |
+
|
| 296 |
+
shutil.rmtree(cache_dir)
|
| 297 |
+
return html_content
|
| 298 |
+
|
| 299 |
+
html_text = convert_latex_to_html(text)
|
| 300 |
+
normlized_tables = normalized_html_table(html_text)
|
| 301 |
+
return normlized_tables
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def normalized_table(text, format='html'):
|
| 305 |
+
if format not in ['html', 'latex']:
|
| 306 |
+
raise ValueError('Invalid format: {}'.format(format))
|
| 307 |
+
else:
|
| 308 |
+
return globals()['normalized_{}_table'.format(format)](text)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def textblock_with_norm_formula(text):
|
| 312 |
+
inline_matches = inline_reg.finditer(text)
|
| 313 |
+
removal_positions = []
|
| 314 |
+
for match in inline_matches:
|
| 315 |
+
position = [match.start(), match.end()]
|
| 316 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 317 |
+
# print('-------- content-------', content)
|
| 318 |
+
|
| 319 |
+
norm_content = normalized_formula(content)
|
| 320 |
+
removal_positions.append((position[0], position[1], norm_content))
|
| 321 |
+
|
| 322 |
+
# Remove inline formulas from original text
|
| 323 |
+
for start, end, norm_content in sorted(removal_positions, reverse=True):
|
| 324 |
+
text = text[:start] + norm_content.strip() + text[end:]
|
| 325 |
+
|
| 326 |
+
return text
|
| 327 |
+
|
| 328 |
+
# def inline_filter_unicode(text):
|
| 329 |
+
# # Ensure text is string type
|
| 330 |
+
# if not isinstance(text, str):
|
| 331 |
+
# text = str(text)
|
| 332 |
+
|
| 333 |
+
# # Convert LaTeX content to Unicode representation
|
| 334 |
+
# text = LatexNodes2Text().latex_to_text(text)
|
| 335 |
+
|
| 336 |
+
# inline_array = []
|
| 337 |
+
# inline_matches = inline_reg.finditer(text)
|
| 338 |
+
|
| 339 |
+
# for match in inline_matches:
|
| 340 |
+
# position = [match.start(), match.end()]
|
| 341 |
+
# content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 342 |
+
|
| 343 |
+
# # Remove escape characters \
|
| 344 |
+
# clean_content = re.sub(r'\\([\\_&%^])', '', content)
|
| 345 |
+
|
| 346 |
+
# if any(char in clean_content for char in r'\^_'):
|
| 347 |
+
# # inline_array.append(match.group(0))
|
| 348 |
+
# inline_array.append({
|
| 349 |
+
# 'category_type': 'equation_inline',
|
| 350 |
+
# 'position': position,
|
| 351 |
+
# 'content': match.group(0),
|
| 352 |
+
# })
|
| 353 |
+
# text = text.replace(match.group(0), '')
|
| 354 |
+
# # print('-----Found inline formula: ', match.group(0))
|
| 355 |
+
# else:
|
| 356 |
+
# text = text.replace(match.group(0), content)
|
| 357 |
+
# # # Add to inline_array
|
| 358 |
+
# # inline_array.append({
|
| 359 |
+
# # 'category_type': 'equation_inline',
|
| 360 |
+
# # 'position': position,
|
| 361 |
+
# # 'content': content,
|
| 362 |
+
# # })
|
| 363 |
+
|
| 364 |
+
# # # Remove matched formula from original text, can choose to replace with spaces or remove directly
|
| 365 |
+
# # text = text[:position[0]] + ' '*(position[1]-position[0]) + text[position[1]:]
|
| 366 |
+
|
| 367 |
+
# return text, inline_array
|
| 368 |
+
|
| 369 |
+
def inline_filter_unicode(text):
|
| 370 |
+
# Ensure text is string type
|
| 371 |
+
if not isinstance(text, str):
|
| 372 |
+
text = str(text)
|
| 373 |
+
|
| 374 |
+
# Replace inline formula boundary markers
|
| 375 |
+
#print('--------text-------',text)
|
| 376 |
+
placeholder = '__INLINE_FORMULA_BOUNDARY__'
|
| 377 |
+
text_copy = text.replace('$', placeholder).replace('\\(', placeholder).replace('\\)', placeholder)
|
| 378 |
+
#print('--------text_copy-------',text_copy)
|
| 379 |
+
# Convert LaTeX content to Unicode representation
|
| 380 |
+
text_copy = LatexNodes2Text().latex_to_text(text_copy)
|
| 381 |
+
#print('--------text_copy---unicode----',text_copy)
|
| 382 |
+
# Restore boundary markers
|
| 383 |
+
text_copy = text_copy.replace(placeholder, '$')
|
| 384 |
+
|
| 385 |
+
inline_array = []
|
| 386 |
+
inline_matches = inline_reg.finditer(text_copy)
|
| 387 |
+
# Record positions of inline formulas to be removed
|
| 388 |
+
removal_positions = []
|
| 389 |
+
|
| 390 |
+
for match in inline_matches:
|
| 391 |
+
position = [match.start(), match.end()]
|
| 392 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 393 |
+
print('-------- content-------', content)
|
| 394 |
+
# Remove escape characters \
|
| 395 |
+
clean_content = re.sub(r'\\([\\_&%^])', '', content)
|
| 396 |
+
|
| 397 |
+
if any(char in clean_content for char in r'\^_'):
|
| 398 |
+
# inline_array.append(match.group(0))
|
| 399 |
+
inline_array.append({
|
| 400 |
+
'category_type': 'equation_inline',
|
| 401 |
+
'position': position,
|
| 402 |
+
'content': content,
|
| 403 |
+
})
|
| 404 |
+
removal_positions.append((position[0], position[1]))
|
| 405 |
+
|
| 406 |
+
# Remove inline formulas from original text
|
| 407 |
+
for start, end in sorted(removal_positions, reverse=True):
|
| 408 |
+
text = text[:start] + text[end:]
|
| 409 |
+
|
| 410 |
+
return text, inline_array
|
| 411 |
+
|
| 412 |
+
def inline_filter(text):
|
| 413 |
+
# Ensure text is string type
|
| 414 |
+
if not isinstance(text, str):
|
| 415 |
+
text = str(text)
|
| 416 |
+
|
| 417 |
+
inline_array = []
|
| 418 |
+
inline_matches = inline_reg.finditer(text)
|
| 419 |
+
|
| 420 |
+
for match in inline_matches:
|
| 421 |
+
position = [match.start(), match.end()]
|
| 422 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 423 |
+
# print('inline_content: ', content)
|
| 424 |
+
|
| 425 |
+
# Remove escape characters \
|
| 426 |
+
clean_content = re.sub(r'\\([\\_&%^])', '', content)
|
| 427 |
+
|
| 428 |
+
if any(char in clean_content for char in r'\^_'):
|
| 429 |
+
# inline_array.append(match.group(0))
|
| 430 |
+
inline_array.append({
|
| 431 |
+
'category_type': 'equation_inline',
|
| 432 |
+
'position': position,
|
| 433 |
+
'content': match.group(0),
|
| 434 |
+
})
|
| 435 |
+
text = text.replace(match.group(0), '')
|
| 436 |
+
# print('-----Found inline formula: ', match.group(0))
|
| 437 |
+
else:
|
| 438 |
+
text = text.replace(match.group(0), content)
|
| 439 |
+
|
| 440 |
+
return text, inline_array
|
| 441 |
+
|
| 442 |
+
# Text OCR quality check processing:
|
| 443 |
+
def clean_string(input_string):
|
| 444 |
+
# Use regex to keep Chinese characters, English letters and numbers
|
| 445 |
+
input_string = input_string.replace('\\t', '').replace('\\n', '').replace('\t', '').replace('\n', '').replace('/t', '').replace('/n', '')
|
| 446 |
+
cleaned_string = re.sub(r'[^\w\u4e00-\u9fff]', '', input_string)
|
| 447 |
+
return cleaned_string
|
VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate>=0.26.0
|
| 2 |
+
apted
|
| 3 |
+
BeautifulSoup4
|
| 4 |
+
evaluate
|
| 5 |
+
func_timeout
|
| 6 |
+
jmespath
|
| 7 |
+
Levenshtein
|
| 8 |
+
lxml
|
| 9 |
+
nltk
|
| 10 |
+
pylatexenc
|
| 11 |
+
qwen_vl_utils
|
| 12 |
+
scipy
|
| 13 |
+
torchvision
|
VLMEvalKit-sudoku/vlmeval/dataset/OmniDocBench/utils.py
ADDED
|
@@ -0,0 +1,1916 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import unicodedata
|
| 3 |
+
import subprocess
|
| 4 |
+
import shutil
|
| 5 |
+
import uuid
|
| 6 |
+
import html
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
import pdb
|
| 10 |
+
import json
|
| 11 |
+
import copy
|
| 12 |
+
import unicodedata
|
| 13 |
+
|
| 14 |
+
import Levenshtein
|
| 15 |
+
import numpy as np
|
| 16 |
+
from bs4 import BeautifulSoup
|
| 17 |
+
from pylatexenc.latex2text import LatexNodes2Text
|
| 18 |
+
from scipy.optimize import linear_sum_assignment
|
| 19 |
+
from pylatexenc.latexencode import unicode_to_latex
|
| 20 |
+
from pylatexenc.latex2text import LatexNodes2Text
|
| 21 |
+
from pylatexenc.latexwalker import LatexWalker, LatexEnvironmentNode, LatexCharsNode, LatexGroupNode, LatexMacroNode, LatexSpecialsNode
|
| 22 |
+
from collections import defaultdict
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def read_md_file(filepath):
|
| 26 |
+
with open(filepath, 'r', encoding='utf-8') as file:
|
| 27 |
+
content = file.read()
|
| 28 |
+
|
| 29 |
+
return content
|
| 30 |
+
|
| 31 |
+
def save_paired_result(preds, gts, save_path):
|
| 32 |
+
save_result = []
|
| 33 |
+
formula_id = 0
|
| 34 |
+
for gt, pred in zip(gts, preds):
|
| 35 |
+
save_result.append({
|
| 36 |
+
"gt": gt,
|
| 37 |
+
"pred": pred,
|
| 38 |
+
"img_id": formula_id
|
| 39 |
+
})
|
| 40 |
+
formula_id += 1
|
| 41 |
+
with open(save_path, 'w', encoding='utf-8') as f:
|
| 42 |
+
json.dump(save_result, f, indent=4, ensure_ascii=False)
|
| 43 |
+
|
| 44 |
+
def remove_markdown_fences(content):
|
| 45 |
+
content = re.sub(r'^```markdown\n?', '', content, flags=re.MULTILINE)
|
| 46 |
+
content = re.sub(r'```\n?$', '', content, flags=re.MULTILINE)
|
| 47 |
+
return content
|
| 48 |
+
|
| 49 |
+
# Standardize all consecutive characters
|
| 50 |
+
def replace_repeated_chars(input_str):
|
| 51 |
+
input_str = re.sub(r'_{4,}', '____', input_str) # Replace more than 4 consecutive underscores with 4 underscores
|
| 52 |
+
input_str = re.sub(r' {4,}', ' ', input_str) # Replace more than 4 consecutive spaces with 4 spaces
|
| 53 |
+
return re.sub(r'([^a-zA-Z0-9])\1{10,}', r'\1\1\1\1', input_str) # For other consecutive symbols (except numbers and letters), replace more than 10 occurrences with 4
|
| 54 |
+
|
| 55 |
+
# Special Unicode handling
|
| 56 |
+
def fullwidth_to_halfwidth(s):
|
| 57 |
+
result = []
|
| 58 |
+
for char in s:
|
| 59 |
+
code = ord(char)
|
| 60 |
+
# Convert full-width space to half-width space
|
| 61 |
+
if code == 0x3000:
|
| 62 |
+
code = 0x0020
|
| 63 |
+
# Convert other full-width characters to half-width
|
| 64 |
+
elif 0xFF01 <= code <= 0xFF5E:
|
| 65 |
+
code -= 0xFEE0
|
| 66 |
+
result.append(chr(code))
|
| 67 |
+
return ''.join(result)
|
| 68 |
+
|
| 69 |
+
def find_special_unicode(s):
|
| 70 |
+
special_chars = {}
|
| 71 |
+
for char in s:
|
| 72 |
+
if ord(char) > 127: # Non-ASCII characters
|
| 73 |
+
# unicode_name = unicodedata.name(char, None)
|
| 74 |
+
unicode_name = unicodedata.category(char)
|
| 75 |
+
special_chars[char] = f'U+{ord(char):04X} ({unicode_name})'
|
| 76 |
+
return special_chars
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
inline_reg = re.compile(
|
| 80 |
+
r'\$(.*?)\$|'
|
| 81 |
+
r'\\\((.*?)\\\)',
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
def textblock2unicode(text):
|
| 85 |
+
inline_matches = inline_reg.finditer(text)
|
| 86 |
+
removal_positions = []
|
| 87 |
+
for match in inline_matches:
|
| 88 |
+
position = [match.start(), match.end()]
|
| 89 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 90 |
+
# print('-------- content-------', content)
|
| 91 |
+
# Remove escape characters \
|
| 92 |
+
clean_content = re.sub(r'\\([\\_&%^])', '', content)
|
| 93 |
+
|
| 94 |
+
try:
|
| 95 |
+
if any(char in clean_content for char in r'\^_'):
|
| 96 |
+
if clean_content.endswith('\\'):
|
| 97 |
+
clean_content += ' '
|
| 98 |
+
# inline_array.append(match.group(0))
|
| 99 |
+
unicode_content = LatexNodes2Text().latex_to_text(clean_content)
|
| 100 |
+
removal_positions.append((position[0], position[1], unicode_content))
|
| 101 |
+
except:
|
| 102 |
+
continue
|
| 103 |
+
|
| 104 |
+
# Remove inline formulas from original text
|
| 105 |
+
for start, end, unicode_content in sorted(removal_positions, reverse=True):
|
| 106 |
+
text = text[:start] + unicode_content.strip() + text[end:]
|
| 107 |
+
|
| 108 |
+
return text
|
| 109 |
+
|
| 110 |
+
def normalized_formula(text):
|
| 111 |
+
# Normalize math formulas before matching
|
| 112 |
+
filter_list = ['\\mathbf', '\\mathrm', '\\mathnormal', '\\mathit', '\\mathbb', '\\mathcal', '\\mathscr', '\\mathfrak', '\\mathsf', '\\mathtt',
|
| 113 |
+
'\\textbf', '\\text', '\\boldmath', '\\boldsymbol', '\\operatorname', '\\bm',
|
| 114 |
+
'\\symbfit', '\\mathbfcal', '\\symbf', '\\scriptscriptstyle', '\\notag',
|
| 115 |
+
'\\setlength', '\\coloneqq', '\\space', '\\thickspace', '\\thinspace', '\\medspace', '\\nobreakspace', '\\negmedspace',
|
| 116 |
+
'\\quad', '\\qquad', '\\enspace', '\\substackw', ' ']
|
| 117 |
+
# '\\left', '\\right', '{', '}', ' ']
|
| 118 |
+
|
| 119 |
+
# delimiter_filter
|
| 120 |
+
pattern = re.compile(r"\\\[(.+?)(?<!\\)\\\]")
|
| 121 |
+
match = pattern.search(text)
|
| 122 |
+
|
| 123 |
+
if match:
|
| 124 |
+
text = match.group(1).strip()
|
| 125 |
+
|
| 126 |
+
tag_pattern = re.compile(r"\\tag\{.*?\}")
|
| 127 |
+
text = tag_pattern.sub('', text)
|
| 128 |
+
hspace_pattern = re.compile(r"\\hspace\{.*?\}")
|
| 129 |
+
text = hspace_pattern.sub('', text)
|
| 130 |
+
begin_pattern = re.compile(r"\\begin\{.*?\}")
|
| 131 |
+
text = begin_pattern.sub('', text)
|
| 132 |
+
end_pattern = re.compile(r"\\end\{.*?\}")
|
| 133 |
+
text = end_pattern.sub('', text)
|
| 134 |
+
col_sep = re.compile(r"\\arraycolsep.*?\}")
|
| 135 |
+
text = col_sep.sub('', text)
|
| 136 |
+
text = text.strip('.')
|
| 137 |
+
|
| 138 |
+
for filter_text in filter_list:
|
| 139 |
+
text = text.replace(filter_text, '')
|
| 140 |
+
|
| 141 |
+
# text = normalize_text(delimiter_filter(text))
|
| 142 |
+
# text = delimiter_filter(text)
|
| 143 |
+
text = text.lower()
|
| 144 |
+
return text
|
| 145 |
+
|
| 146 |
+
def normalized_html_table(text):
|
| 147 |
+
def process_table_html(md_i):
|
| 148 |
+
"""
|
| 149 |
+
pred_md format edit
|
| 150 |
+
"""
|
| 151 |
+
def process_table_html(html_content):
|
| 152 |
+
soup = BeautifulSoup(html_content, 'html.parser')
|
| 153 |
+
th_tags = soup.find_all('th')
|
| 154 |
+
for th in th_tags:
|
| 155 |
+
th.name = 'td'
|
| 156 |
+
thead_tags = soup.find_all('thead')
|
| 157 |
+
for thead in thead_tags:
|
| 158 |
+
thead.unwrap() # unwrap()会移除标签但保留其内容
|
| 159 |
+
math_tags = soup.find_all('math')
|
| 160 |
+
for math_tag in math_tags:
|
| 161 |
+
alttext = math_tag.get('alttext', '')
|
| 162 |
+
alttext = f'${alttext}$'
|
| 163 |
+
if alttext:
|
| 164 |
+
math_tag.replace_with(alttext)
|
| 165 |
+
span_tags = soup.find_all('span')
|
| 166 |
+
for span in span_tags:
|
| 167 |
+
span.unwrap()
|
| 168 |
+
return str(soup)
|
| 169 |
+
|
| 170 |
+
table_res=''
|
| 171 |
+
table_res_no_space=''
|
| 172 |
+
if '<table' in md_i.replace(" ","").replace("'",'"'):
|
| 173 |
+
md_i = process_table_html(md_i)
|
| 174 |
+
table_res = html.unescape(md_i).replace('\n', '')
|
| 175 |
+
table_res = unicodedata.normalize('NFKC', table_res).strip()
|
| 176 |
+
pattern = r'<table\b[^>]*>(.*)</table>'
|
| 177 |
+
tables = re.findall(pattern, table_res, re.DOTALL | re.IGNORECASE)
|
| 178 |
+
table_res = ''.join(tables)
|
| 179 |
+
# table_res = re.sub('<table.*?>','',table_res)
|
| 180 |
+
table_res = re.sub('( style=".*?")', "", table_res)
|
| 181 |
+
table_res = re.sub('( height=".*?")', "", table_res)
|
| 182 |
+
table_res = re.sub('( width=".*?")', "", table_res)
|
| 183 |
+
table_res = re.sub('( align=".*?")', "", table_res)
|
| 184 |
+
table_res = re.sub('( class=".*?")', "", table_res)
|
| 185 |
+
table_res = re.sub('</?tbody>',"",table_res)
|
| 186 |
+
|
| 187 |
+
table_res = re.sub(r'\s+', " ", table_res)
|
| 188 |
+
table_res_no_space = '<html><body><table border="1" >' + table_res.replace(' ','') + '</table></body></html>'
|
| 189 |
+
# table_res_no_space = re.sub(' (style=".*?")',"",table_res_no_space)
|
| 190 |
+
# table_res_no_space = re.sub(r'[ ]', " ", table_res_no_space)
|
| 191 |
+
table_res_no_space = re.sub('colspan="', ' colspan="', table_res_no_space)
|
| 192 |
+
table_res_no_space = re.sub('rowspan="', ' rowspan="', table_res_no_space)
|
| 193 |
+
table_res_no_space = re.sub('border="', ' border="', table_res_no_space)
|
| 194 |
+
|
| 195 |
+
table_res = '<html><body><table border="1" >' + table_res + '</table></body></html>'
|
| 196 |
+
# table_flow.append(table_res)
|
| 197 |
+
# table_flow_no_space.append(table_res_no_space)
|
| 198 |
+
|
| 199 |
+
return table_res, table_res_no_space
|
| 200 |
+
|
| 201 |
+
def clean_table(input_str,flag=True):
|
| 202 |
+
if flag:
|
| 203 |
+
input_str = input_str.replace('<sup>', '').replace('</sup>', '')
|
| 204 |
+
input_str = input_str.replace('<sub>', '').replace('</sub>', '')
|
| 205 |
+
input_str = input_str.replace('<span>', '').replace('</span>', '')
|
| 206 |
+
input_str = input_str.replace('<div>', '').replace('</div>', '')
|
| 207 |
+
input_str = input_str.replace('<p>', '').replace('</p>', '')
|
| 208 |
+
input_str = input_str.replace('<spandata-span-identity="">', '')
|
| 209 |
+
input_str = re.sub('<colgroup>.*?</colgroup>','',input_str)
|
| 210 |
+
return input_str
|
| 211 |
+
|
| 212 |
+
norm_text, _ = process_table_html(text)
|
| 213 |
+
norm_text = clean_table(norm_text)
|
| 214 |
+
return norm_text
|
| 215 |
+
|
| 216 |
+
def normalized_latex_table(text):
|
| 217 |
+
def latex_template(latex_code):
|
| 218 |
+
template = r'''
|
| 219 |
+
\documentclass[border=20pt]{article}
|
| 220 |
+
\usepackage{subcaption}
|
| 221 |
+
\usepackage{url}
|
| 222 |
+
\usepackage{graphicx}
|
| 223 |
+
\usepackage{caption}
|
| 224 |
+
\usepackage{multirow}
|
| 225 |
+
\usepackage{booktabs}
|
| 226 |
+
\usepackage{color}
|
| 227 |
+
\usepackage{colortbl}
|
| 228 |
+
\usepackage{xcolor,soul,framed}
|
| 229 |
+
\usepackage{fontspec}
|
| 230 |
+
\usepackage{amsmath,amssymb,mathtools,bm,mathrsfs,textcomp}
|
| 231 |
+
\setlength{\parindent}{0pt}''' + \
|
| 232 |
+
r'''
|
| 233 |
+
\begin{document}
|
| 234 |
+
''' + \
|
| 235 |
+
latex_code + \
|
| 236 |
+
r'''
|
| 237 |
+
\end{document}'''
|
| 238 |
+
|
| 239 |
+
return template
|
| 240 |
+
|
| 241 |
+
def process_table_latex(latex_code):
|
| 242 |
+
SPECIAL_STRINGS= [
|
| 243 |
+
['\\\\vspace\\{.*?\\}', ''],
|
| 244 |
+
['\\\\hspace\\{.*?\\}', ''],
|
| 245 |
+
['\\\\rule\{.*?\\}\\{.*?\\}', ''],
|
| 246 |
+
['\\\\addlinespace\\[.*?\\]', ''],
|
| 247 |
+
['\\\\addlinespace', ''],
|
| 248 |
+
['\\\\renewcommand\\{\\\\arraystretch\\}\\{.*?\\}', ''],
|
| 249 |
+
['\\\\arraystretch\\{.*?\\}', ''],
|
| 250 |
+
['\\\\(row|column)?colors?\\{[^}]*\\}(\\{[^}]*\\}){0,2}', ''],
|
| 251 |
+
['\\\\color\\{.*?\\}', ''],
|
| 252 |
+
['\\\\textcolor\\{.*?\\}', ''],
|
| 253 |
+
['\\\\rowcolor(\\[.*?\\])?\\{.*?\\}', ''],
|
| 254 |
+
['\\\\columncolor(\\[.*?\\])?\\{.*?\\}', ''],
|
| 255 |
+
['\\\\cellcolor(\\[.*?\\])?\\{.*?\\}', ''],
|
| 256 |
+
['\\\\colorbox\\{.*?\\}', ''],
|
| 257 |
+
['\\\\(tiny|scriptsize|footnotesize|small|normalsize|large|Large|LARGE|huge|Huge)', ''],
|
| 258 |
+
[r'\s+', ' '],
|
| 259 |
+
['\\\\centering', ''],
|
| 260 |
+
['\\\\begin\\{table\\}\\[.*?\\]', '\\\\begin{table}'],
|
| 261 |
+
['\t', ''],
|
| 262 |
+
['@{}', ''],
|
| 263 |
+
['\\\\toprule(\\[.*?\\])?', '\\\\hline'],
|
| 264 |
+
['\\\\bottomrule(\\[.*?\\])?', '\\\\hline'],
|
| 265 |
+
['\\\\midrule(\\[.*?\\])?', '\\\\hline'],
|
| 266 |
+
['p\\{[^}]*\\}', 'l'],
|
| 267 |
+
['m\\{[^}]*\\}', 'c'],
|
| 268 |
+
['\\\\scalebox\\{[^}]*\\}\\{([^}]*)\\}', '\\1'],
|
| 269 |
+
['\\\\textbf\\{([^}]*)\\}', '\\1'],
|
| 270 |
+
['\\\\textit\\{([^}]*)\\}', '\\1'],
|
| 271 |
+
['\\\\cmidrule(\\[.*?\\])?\\(.*?\\)\\{([0-9]-[0-9])\\}', '\\\\cline{\\2}'],
|
| 272 |
+
['\\\\hline', ''],
|
| 273 |
+
[r'\\multicolumn\{1\}\{[^}]*\}\{((?:[^{}]|(?:\{[^{}]*\}))*)\}', r'\1']
|
| 274 |
+
]
|
| 275 |
+
pattern = r'\\begin\{tabular\}.*\\end\{tabular\}' # 注意这里不用 .*?
|
| 276 |
+
matches = re.findall(pattern, latex_code, re.DOTALL)
|
| 277 |
+
latex_code = ' '.join(matches)
|
| 278 |
+
|
| 279 |
+
for special_str in SPECIAL_STRINGS:
|
| 280 |
+
latex_code = re.sub(fr'{special_str[0]}', fr'{special_str[1]}', latex_code)
|
| 281 |
+
|
| 282 |
+
return latex_code
|
| 283 |
+
|
| 284 |
+
def convert_latex_to_html(latex_content, cache_dir='./temp'):
|
| 285 |
+
if not os.path.exists(cache_dir):
|
| 286 |
+
os.makedirs(cache_dir)
|
| 287 |
+
|
| 288 |
+
uuid_str = str(uuid.uuid1())
|
| 289 |
+
with open(f'{cache_dir}/{uuid_str}.tex', 'w') as f:
|
| 290 |
+
f.write(latex_template(latex_content))
|
| 291 |
+
|
| 292 |
+
cmd = ['latexmlc', '--quiet', '--nocomments', f'--log={cache_dir}/{uuid_str}.log',
|
| 293 |
+
f'{cache_dir}/{uuid_str}.tex', f'--dest={cache_dir}/{uuid_str}.html']
|
| 294 |
+
try:
|
| 295 |
+
subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
| 296 |
+
with open(f'{cache_dir}/{uuid_str}.html', 'r') as f:
|
| 297 |
+
html_content = f.read()
|
| 298 |
+
|
| 299 |
+
pattern = r'<table\b[^>]*>(.*)</table>'
|
| 300 |
+
tables = re.findall(pattern, html_content, re.DOTALL | re.IGNORECASE)
|
| 301 |
+
tables = [f'<table>{table}</table>' for table in tables]
|
| 302 |
+
html_content = '\n'.join(tables)
|
| 303 |
+
|
| 304 |
+
except Exception as e:
|
| 305 |
+
html_content = ''
|
| 306 |
+
|
| 307 |
+
shutil.rmtree(cache_dir)
|
| 308 |
+
return html_content
|
| 309 |
+
|
| 310 |
+
html_text = convert_latex_to_html(text)
|
| 311 |
+
normlized_tables = normalized_html_table(html_text)
|
| 312 |
+
return normlized_tables
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def normalized_table(text, format='html'):
|
| 316 |
+
if format not in ['html', 'latex']:
|
| 317 |
+
raise ValueError('Invalid format: {}'.format(format))
|
| 318 |
+
else:
|
| 319 |
+
return globals()['normalized_{}_table'.format(format)](text)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def textblock_with_norm_formula(text):
|
| 323 |
+
inline_matches = inline_reg.finditer(text)
|
| 324 |
+
removal_positions = []
|
| 325 |
+
for match in inline_matches:
|
| 326 |
+
position = [match.start(), match.end()]
|
| 327 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 328 |
+
# print('-------- content-------', content)
|
| 329 |
+
|
| 330 |
+
norm_content = normalized_formula(content)
|
| 331 |
+
removal_positions.append((position[0], position[1], norm_content))
|
| 332 |
+
|
| 333 |
+
# Remove inline formulas from original text
|
| 334 |
+
for start, end, norm_content in sorted(removal_positions, reverse=True):
|
| 335 |
+
text = text[:start] + norm_content.strip() + text[end:]
|
| 336 |
+
|
| 337 |
+
return text
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def inline_filter_unicode(text):
|
| 341 |
+
# Ensure text is string type
|
| 342 |
+
if not isinstance(text, str):
|
| 343 |
+
text = str(text)
|
| 344 |
+
|
| 345 |
+
# Replace inline formula boundary markers
|
| 346 |
+
#print('--------text-------',text)
|
| 347 |
+
placeholder = '__INLINE_FORMULA_BOUNDARY__'
|
| 348 |
+
text_copy = text.replace('$', placeholder).replace('\\(', placeholder).replace('\\)', placeholder)
|
| 349 |
+
#print('--------text_copy-------',text_copy)
|
| 350 |
+
# Convert LaTeX content to Unicode representation
|
| 351 |
+
text_copy = LatexNodes2Text().latex_to_text(text_copy)
|
| 352 |
+
#print('--------text_copy---unicode----',text_copy)
|
| 353 |
+
# Restore boundary markers
|
| 354 |
+
text_copy = text_copy.replace(placeholder, '$')
|
| 355 |
+
|
| 356 |
+
inline_array = []
|
| 357 |
+
inline_matches = inline_reg.finditer(text_copy)
|
| 358 |
+
# Record positions of inline formulas to be removed
|
| 359 |
+
removal_positions = []
|
| 360 |
+
|
| 361 |
+
for match in inline_matches:
|
| 362 |
+
position = [match.start(), match.end()]
|
| 363 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 364 |
+
print('-------- content-------', content)
|
| 365 |
+
# Remove escape characters \
|
| 366 |
+
clean_content = re.sub(r'\\([\\_&%^])', '', content)
|
| 367 |
+
|
| 368 |
+
if any(char in clean_content for char in r'\^_'):
|
| 369 |
+
# inline_array.append(match.group(0))
|
| 370 |
+
inline_array.append({
|
| 371 |
+
'category_type': 'equation_inline',
|
| 372 |
+
'position': position,
|
| 373 |
+
'content': content,
|
| 374 |
+
})
|
| 375 |
+
removal_positions.append((position[0], position[1]))
|
| 376 |
+
|
| 377 |
+
# Remove inline formulas from original text
|
| 378 |
+
for start, end in sorted(removal_positions, reverse=True):
|
| 379 |
+
text = text[:start] + text[end:]
|
| 380 |
+
|
| 381 |
+
return text, inline_array
|
| 382 |
+
|
| 383 |
+
def inline_filter(text):
|
| 384 |
+
# Ensure text is string type
|
| 385 |
+
if not isinstance(text, str):
|
| 386 |
+
text = str(text)
|
| 387 |
+
|
| 388 |
+
inline_array = []
|
| 389 |
+
inline_matches = inline_reg.finditer(text)
|
| 390 |
+
|
| 391 |
+
for match in inline_matches:
|
| 392 |
+
position = [match.start(), match.end()]
|
| 393 |
+
content = match.group(1) if match.group(1) is not None else match.group(2)
|
| 394 |
+
# print('inline_content: ', content)
|
| 395 |
+
|
| 396 |
+
# Remove escape characters \
|
| 397 |
+
clean_content = re.sub(r'\\([\\_&%^])', '', content)
|
| 398 |
+
|
| 399 |
+
if any(char in clean_content for char in r'\^_'):
|
| 400 |
+
# inline_array.append(match.group(0))
|
| 401 |
+
inline_array.append({
|
| 402 |
+
'category_type': 'equation_inline',
|
| 403 |
+
'position': position,
|
| 404 |
+
'content': match.group(0),
|
| 405 |
+
})
|
| 406 |
+
text = text.replace(match.group(0), '')
|
| 407 |
+
# print('-----Found inline formula: ', match.group(0))
|
| 408 |
+
else:
|
| 409 |
+
text = text.replace(match.group(0), content)
|
| 410 |
+
|
| 411 |
+
return text, inline_array
|
| 412 |
+
|
| 413 |
+
# Text OCR quality check processing:
|
| 414 |
+
def clean_string(input_string):
|
| 415 |
+
# Use regex to keep Chinese characters, English letters and numbers
|
| 416 |
+
input_string = input_string.replace('\\t', '').replace('\\n', '').replace('\t', '').replace('\n', '').replace('/t', '').replace('/n', '')
|
| 417 |
+
cleaned_string = re.sub(r'[^\w\u4e00-\u9fff]', '', input_string)
|
| 418 |
+
return cleaned_string
|
| 419 |
+
|
| 420 |
+
def extract_tabular(text):
|
| 421 |
+
begin_pattern = r'\\begin{tabular}'
|
| 422 |
+
end_pattern = r'\\end{tabular}'
|
| 423 |
+
|
| 424 |
+
tabulars = []
|
| 425 |
+
positions = []
|
| 426 |
+
current_pos = 0
|
| 427 |
+
stack = []
|
| 428 |
+
|
| 429 |
+
while current_pos < len(text):
|
| 430 |
+
begin_match = re.search(begin_pattern, text[current_pos:])
|
| 431 |
+
end_match = re.search(end_pattern, text[current_pos:])
|
| 432 |
+
|
| 433 |
+
if not begin_match and not end_match:
|
| 434 |
+
break
|
| 435 |
+
|
| 436 |
+
if begin_match and (not end_match or begin_match.start() < end_match.start()):
|
| 437 |
+
stack.append(current_pos + begin_match.start())
|
| 438 |
+
current_pos += begin_match.start() + len(end_pattern)
|
| 439 |
+
elif end_match:
|
| 440 |
+
if stack:
|
| 441 |
+
start_pos = stack.pop()
|
| 442 |
+
if not stack:
|
| 443 |
+
end_pos = current_pos + end_match.start() + len(end_pattern)
|
| 444 |
+
tabular_code = text[start_pos:end_pos]
|
| 445 |
+
tabulars.append(tabular_code)
|
| 446 |
+
positions.append((start_pos, end_pos))
|
| 447 |
+
current_pos += end_match.start() + len(end_pattern)
|
| 448 |
+
else:
|
| 449 |
+
current_pos += 1
|
| 450 |
+
|
| 451 |
+
if stack:
|
| 452 |
+
new_start = stack[0] + len(begin_pattern)
|
| 453 |
+
new_tabulars, new_positions = extract_tabular(text[new_start:])
|
| 454 |
+
new_positions = [(start + new_start, end + new_start) for start, end in new_positions]
|
| 455 |
+
tabulars.extend(new_tabulars)
|
| 456 |
+
positions.extend(new_positions)
|
| 457 |
+
|
| 458 |
+
return tabulars, positions
|
| 459 |
+
|
| 460 |
+
# math reg
|
| 461 |
+
# r'\\begin{equation\*?}(.*?)\\end{equation\*?}|'
|
| 462 |
+
# r'\\begin{align\*?}(.*?)\\end{align\*?}|'
|
| 463 |
+
# r'\\begin{gather\*?}(.*?)\\end{gather\*?}|'
|
| 464 |
+
display_reg = re.compile(
|
| 465 |
+
r'\$\$(.*?)\$\$|'
|
| 466 |
+
r'\\\[(.*?)\\\]|'
|
| 467 |
+
r'\$(.*?)\$|'
|
| 468 |
+
r'\\\((.*?)\\\)',
|
| 469 |
+
re.DOTALL
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
# inline_reg = re.compile(
|
| 473 |
+
# r'(?<!\$)\$(?!\$)(.*?)(?<!\$)\$(?!\$)|'
|
| 474 |
+
# r'\\\((.*?)\\\)',
|
| 475 |
+
# )
|
| 476 |
+
inline_reg = re.compile(
|
| 477 |
+
r'\$(.*?)\$|'
|
| 478 |
+
r'\\\((.*?)\\\)',
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
# table
|
| 482 |
+
table_reg = re.compile(
|
| 483 |
+
r'\\begin{table\*?}(.*?)\\end{table\*?}|'
|
| 484 |
+
r'\\begin{tabular\*?}(.*?)\\end{tabular\*?}',
|
| 485 |
+
re.DOTALL
|
| 486 |
+
)
|
| 487 |
+
md_table_reg = re.compile(
|
| 488 |
+
r'\|\s*.*?\s*\|\n',
|
| 489 |
+
re.DOTALL)
|
| 490 |
+
html_table_reg = re.compile(
|
| 491 |
+
r'(<table.*?</table>)',
|
| 492 |
+
re.DOTALL
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
# title
|
| 496 |
+
title_reg = re.compile(
|
| 497 |
+
r'^\s*#.*$',
|
| 498 |
+
re.MULTILINE)
|
| 499 |
+
|
| 500 |
+
# img
|
| 501 |
+
img_pattern = r'!\[.*?\]\(.*?\)'
|
| 502 |
+
|
| 503 |
+
# code block
|
| 504 |
+
code_block_reg = re.compile(
|
| 505 |
+
r'```(\w+)\n(.*?)```',
|
| 506 |
+
re.DOTALL
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def md_tex_filter(content):
|
| 511 |
+
'''
|
| 512 |
+
Input: 1 page md or tex content - String
|
| 513 |
+
Output: text, display, inline, table, title, code - list
|
| 514 |
+
'''
|
| 515 |
+
content = re.sub(img_pattern, '', content) # remove image
|
| 516 |
+
content = remove_markdown_fences(content) # remove markdown fences
|
| 517 |
+
content = replace_repeated_chars(content) # replace all consecutive characters
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
pred_all = []
|
| 522 |
+
latex_table_array, table_positions = extract_tex_table(content)
|
| 523 |
+
for latex_table, position in zip(latex_table_array, table_positions):
|
| 524 |
+
position = [position[0], position[0]+len(latex_table)] # !!!
|
| 525 |
+
pred_all.append({
|
| 526 |
+
'category_type': 'latex_table',
|
| 527 |
+
'position': position,
|
| 528 |
+
'content': latex_table
|
| 529 |
+
})
|
| 530 |
+
content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace latex table with space
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
# extract html table
|
| 534 |
+
html_table_array, table_positions = extract_html_table(content)
|
| 535 |
+
for html_table, position in zip(html_table_array, table_positions):
|
| 536 |
+
position = [position[0], position[0]+len(html_table)]
|
| 537 |
+
pred_all.append({
|
| 538 |
+
'category_type': 'html_table',
|
| 539 |
+
'position': position,
|
| 540 |
+
'content': html_table
|
| 541 |
+
})
|
| 542 |
+
content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace html table with space
|
| 543 |
+
|
| 544 |
+
# extract interline formula
|
| 545 |
+
display_matches = display_reg.finditer(content)
|
| 546 |
+
for match in display_matches:
|
| 547 |
+
matched = match.group(0)
|
| 548 |
+
if matched:
|
| 549 |
+
single_line = ''.join(matched.split())
|
| 550 |
+
position = [match.start(), match.end()]
|
| 551 |
+
# replace $$ with \[\]
|
| 552 |
+
dollar_pattern = re.compile(r'\$\$(.*?)\$\$|\$(.*?)\$|\\\((.*?)\\\)', re.DOTALL)
|
| 553 |
+
sub_match = dollar_pattern.search(single_line)
|
| 554 |
+
if sub_match is None:
|
| 555 |
+
# pass
|
| 556 |
+
content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:]
|
| 557 |
+
pred_all.append({
|
| 558 |
+
'category_type': 'equation_isolated',
|
| 559 |
+
'position': position,
|
| 560 |
+
'content': single_line
|
| 561 |
+
})
|
| 562 |
+
elif sub_match.group(1):
|
| 563 |
+
single_line = re.sub(dollar_pattern, r'\\[\1\\]', single_line)
|
| 564 |
+
content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace equation with space
|
| 565 |
+
pred_all.append({
|
| 566 |
+
'category_type': 'equation_isolated',
|
| 567 |
+
'position': position,
|
| 568 |
+
'content': single_line
|
| 569 |
+
})
|
| 570 |
+
else:
|
| 571 |
+
single_line = re.sub(dollar_pattern, r'\\[\2\3\\]', single_line)
|
| 572 |
+
pred_all.append({
|
| 573 |
+
'category_type': 'equation_isolated',
|
| 574 |
+
'position': position,
|
| 575 |
+
'content': single_line,
|
| 576 |
+
'fine_category_type': 'equation_inline'
|
| 577 |
+
})
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
# extract md table with ||
|
| 581 |
+
md_table_mathces = md_table_reg.findall(content+'\n')
|
| 582 |
+
if len(md_table_mathces) >= 2:
|
| 583 |
+
# print("md table found!")
|
| 584 |
+
# print("content:", content)
|
| 585 |
+
content = convert_markdown_to_html(content)
|
| 586 |
+
# print('----------content after converting md table to html:', content)
|
| 587 |
+
html_table_matches = html_table_reg.finditer(content)
|
| 588 |
+
if html_table_matches:
|
| 589 |
+
for match in html_table_matches:
|
| 590 |
+
matched = match.group(0)
|
| 591 |
+
position = [match.start(), match.end()]
|
| 592 |
+
# content = content.replace(match, '')
|
| 593 |
+
# print('content after removing the md table:', content)
|
| 594 |
+
content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace md table with space
|
| 595 |
+
pred_all.append({
|
| 596 |
+
'category_type': 'html_table',
|
| 597 |
+
'position': position,
|
| 598 |
+
'content': matched.strip(),
|
| 599 |
+
'fine_category_type': 'md2html_table'
|
| 600 |
+
})
|
| 601 |
+
# print('---------After md table: \n', content)
|
| 602 |
+
|
| 603 |
+
# extract code blocks
|
| 604 |
+
code_matches = code_block_reg.finditer(content)
|
| 605 |
+
if code_matches:
|
| 606 |
+
for match in code_matches:
|
| 607 |
+
position = [match.start(), match.end()]
|
| 608 |
+
language = match.group(1)
|
| 609 |
+
code = match.group(2).strip()
|
| 610 |
+
# content = content.replace(match.group(0), '')
|
| 611 |
+
content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace code block with space
|
| 612 |
+
pred_all.append({
|
| 613 |
+
'category_type': 'text_all',
|
| 614 |
+
'position': position,
|
| 615 |
+
'content': code,
|
| 616 |
+
'language': language,
|
| 617 |
+
'fine_category_type': 'code'
|
| 618 |
+
})
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
# Remove latex style
|
| 622 |
+
content = re.sub(r'\\title\{(.*?)\}', r'\1', content)
|
| 623 |
+
content = re.sub(r'\\title\s*\{\s*(.*?)\s*\}', r'\1', content, flags=re.DOTALL)
|
| 624 |
+
content = re.sub(r'\\text\s*\{\s*(.*?)\s*\}', r'\1', content, flags=re.DOTALL)
|
| 625 |
+
content = re.sub(r'\\section\*?\{(.*?)\}', r'\1', content)
|
| 626 |
+
content = re.sub(r'\\section\*?\{\s*(.*?)\s*\}', r'\1', content, flags=re.DOTALL)
|
| 627 |
+
|
| 628 |
+
# extract texts
|
| 629 |
+
res = content.split('\n\n')
|
| 630 |
+
if len(res) == 1:
|
| 631 |
+
res = content.split('\n') # some models do not use double newlines, so use single newlines to split
|
| 632 |
+
|
| 633 |
+
content_position = 0
|
| 634 |
+
for text in res:
|
| 635 |
+
position = [content_position, content_position+len(text)]
|
| 636 |
+
content_position += len(text)
|
| 637 |
+
text = text.strip()
|
| 638 |
+
text = text.strip('\n')
|
| 639 |
+
# print('ori_text: ', text)
|
| 640 |
+
text = '\n'.join([_.strip() for _ in text.split('\n') if _.strip()]) # avoid some single newline content with many spaces
|
| 641 |
+
# print('after strip text: ', text)
|
| 642 |
+
|
| 643 |
+
if text: # Check if the stripped text is not empty
|
| 644 |
+
if text.startswith('<table') and text.endswith('</table>'):
|
| 645 |
+
pred_all.append({
|
| 646 |
+
'category_type': 'html_table',
|
| 647 |
+
'position': position,
|
| 648 |
+
'content': text,
|
| 649 |
+
})
|
| 650 |
+
|
| 651 |
+
elif text.startswith('$') and text.endswith('$'):
|
| 652 |
+
if text.replace('$', '').strip():
|
| 653 |
+
pred_all.append({
|
| 654 |
+
'category_type': 'equation_isolated',
|
| 655 |
+
'position': position,
|
| 656 |
+
'content': text.strip(),
|
| 657 |
+
})
|
| 658 |
+
else:
|
| 659 |
+
text = text.strip()
|
| 660 |
+
if text:
|
| 661 |
+
pred_all.append({
|
| 662 |
+
'category_type': 'text_all',
|
| 663 |
+
'position': position,
|
| 664 |
+
'content': text,
|
| 665 |
+
'fine_category_type': 'text_block'
|
| 666 |
+
})
|
| 667 |
+
|
| 668 |
+
pred_dataset = defaultdict(list)
|
| 669 |
+
pred_all = sorted(pred_all, key=lambda x: x['position'][0])
|
| 670 |
+
for item in pred_all:
|
| 671 |
+
pred_dataset[item['category_type']].append(item)
|
| 672 |
+
# pdb.set_trace()
|
| 673 |
+
return pred_dataset
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
def extract_tex_table(content):
|
| 677 |
+
tables = []
|
| 678 |
+
tables_positions = []
|
| 679 |
+
|
| 680 |
+
pattern = r'\\begin{table}(.*?)\\end{table}'
|
| 681 |
+
for match in re.finditer(pattern, content, re.DOTALL):
|
| 682 |
+
start_pos = match.start()
|
| 683 |
+
end_pos = match.end()
|
| 684 |
+
table_content = match.group(0)
|
| 685 |
+
tables.append(table_content)
|
| 686 |
+
tables_positions.append((start_pos, end_pos))
|
| 687 |
+
content = content[:start_pos] + ' '*(end_pos-start_pos) + content[end_pos:]
|
| 688 |
+
|
| 689 |
+
tabulars, tabular_positions = extract_tabular(content)
|
| 690 |
+
all_tables = tables + tabulars
|
| 691 |
+
all_positions = tables_positions + tabular_positions
|
| 692 |
+
|
| 693 |
+
all_result = sorted([[pos, table]for pos, table in zip(all_positions, all_tables)], key=lambda x: x[0][0])
|
| 694 |
+
all_tables = [x[1] for x in all_result]
|
| 695 |
+
all_positions = [x[0] for x in all_result]
|
| 696 |
+
|
| 697 |
+
return all_tables, all_positions
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def extract_html_table(text):
|
| 701 |
+
begin_pattern = r'<table(?:[^>]*)>'
|
| 702 |
+
end_pattern = r'</table>'
|
| 703 |
+
|
| 704 |
+
tabulars = []
|
| 705 |
+
positions = []
|
| 706 |
+
current_pos = 0
|
| 707 |
+
stack = []
|
| 708 |
+
|
| 709 |
+
while current_pos < len(text):
|
| 710 |
+
begin_match = re.search(begin_pattern, text[current_pos:])
|
| 711 |
+
end_match = re.search(end_pattern, text[current_pos:])
|
| 712 |
+
|
| 713 |
+
if not begin_match and not end_match:
|
| 714 |
+
break
|
| 715 |
+
|
| 716 |
+
if begin_match and (not end_match or begin_match.start() < end_match.start()):
|
| 717 |
+
stack.append(current_pos + begin_match.start())
|
| 718 |
+
current_pos += begin_match.start() + len(end_pattern)
|
| 719 |
+
elif end_match:
|
| 720 |
+
if stack:
|
| 721 |
+
start_pos = stack.pop()
|
| 722 |
+
if not stack:
|
| 723 |
+
end_pos = current_pos + end_match.start() + len(end_pattern)
|
| 724 |
+
tabular_code = text[start_pos:end_pos]
|
| 725 |
+
tabulars.append(tabular_code)
|
| 726 |
+
positions.append((start_pos, end_pos))
|
| 727 |
+
current_pos += end_match.start() + len(end_pattern)
|
| 728 |
+
else:
|
| 729 |
+
current_pos += 1
|
| 730 |
+
|
| 731 |
+
if stack:
|
| 732 |
+
new_start = stack[0] + len(begin_pattern)
|
| 733 |
+
new_tabulars, new_positions = extract_html_table(text[new_start:])
|
| 734 |
+
new_positions = [(start + new_start, end + new_start) for start, end in new_positions]
|
| 735 |
+
tabulars.extend(new_tabulars)
|
| 736 |
+
positions.extend(new_positions)
|
| 737 |
+
|
| 738 |
+
return tabulars, positions
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
def extract_node_content(node):
|
| 742 |
+
""" Recursively extract content from LatexEnvironmentNode and rebuild LaTeX table representation """
|
| 743 |
+
if isinstance(node, LatexCharsNode):
|
| 744 |
+
return node.chars # Use chars attribute
|
| 745 |
+
elif isinstance(node, LatexGroupNode):
|
| 746 |
+
return "{" + "".join(extract_node_content(n) for n in node.nodelist) + "}"
|
| 747 |
+
elif isinstance(node, LatexMacroNode):
|
| 748 |
+
# Extract macro command and its arguments
|
| 749 |
+
macro_content = "\\" + node.macroname
|
| 750 |
+
if node.nodeargs:
|
| 751 |
+
macro_content += "".join([extract_node_content(arg) for arg in node.nodeargs])
|
| 752 |
+
return macro_content
|
| 753 |
+
elif isinstance(node, LatexEnvironmentNode):
|
| 754 |
+
# Extract environment, preserve environment name and arguments
|
| 755 |
+
content = "\\begin{" + node.environmentname + "}"
|
| 756 |
+
if node.nodeargd and node.nodeargd.argnlist:
|
| 757 |
+
# content += "".join("{" + extract_node_content(arg) + "}" for arg in node.nodeargd)
|
| 758 |
+
# content += "".join("{" + extract_node_content(node.nodeargd) + "}")
|
| 759 |
+
content += "{" + extract_node_content(node.nodeargd.argnlist[0]) + "}"
|
| 760 |
+
if node.nodelist:
|
| 761 |
+
content += "".join(extract_node_content(n) for n in node.nodelist)
|
| 762 |
+
content += "\\end{" + node.environmentname + "}"
|
| 763 |
+
return content
|
| 764 |
+
elif isinstance(node, LatexSpecialsNode): # Changed to LatexSpecialsNode
|
| 765 |
+
return node.specials_chars
|
| 766 |
+
else:
|
| 767 |
+
return ""
|
| 768 |
+
|
| 769 |
+
def get_node_end_pos(node):
|
| 770 |
+
"""Recursively determine the end position of a node"""
|
| 771 |
+
if hasattr(node, 'nodelist') and node.nodelist:
|
| 772 |
+
# If the node has child nodes, recursively find the end position of the last child node
|
| 773 |
+
return get_node_end_pos(node.nodelist[-1])
|
| 774 |
+
elif hasattr(node, 'pos_end'):
|
| 775 |
+
# If the node has pos_end attribute, return it directly
|
| 776 |
+
return node.pos_end
|
| 777 |
+
else:
|
| 778 |
+
# If there are no child nodes, assume the node ends at the last character of its content
|
| 779 |
+
return node.pos + len(str(node))
|
| 780 |
+
|
| 781 |
+
def remove_tex_table(content):
|
| 782 |
+
tables, positions = extract_tex_table(content)
|
| 783 |
+
|
| 784 |
+
# Delete in reverse order by position to avoid affecting unprocessed start positions
|
| 785 |
+
for start, end in sorted(positions, reverse=True):
|
| 786 |
+
content = content[:start] + content[end:] # Remove table content
|
| 787 |
+
|
| 788 |
+
return content
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
def get_pred_category_type(pred_idx, pred_items):
|
| 793 |
+
# if pred_idx:
|
| 794 |
+
if pred_items[pred_idx].get('fine_category_type'):
|
| 795 |
+
pred_pred_category_type = pred_items[pred_idx]['fine_category_type']
|
| 796 |
+
else:
|
| 797 |
+
pred_pred_category_type = pred_items[pred_idx]['category_type']
|
| 798 |
+
# else:
|
| 799 |
+
# pred_pred_category_type = ""
|
| 800 |
+
return pred_pred_category_type
|
| 801 |
+
|
| 802 |
+
|
| 803 |
+
def compute_edit_distance_matrix_new(gt_lines, matched_lines):
|
| 804 |
+
try:
|
| 805 |
+
distance_matrix = np.zeros((len(gt_lines), len(matched_lines)))
|
| 806 |
+
for i, gt_line in enumerate(gt_lines):
|
| 807 |
+
for j, matched_line in enumerate(matched_lines):
|
| 808 |
+
if len(gt_line) == 0 and len(matched_line) == 0:
|
| 809 |
+
distance_matrix[i][j] = 0
|
| 810 |
+
else:
|
| 811 |
+
distance_matrix[i][j] = Levenshtein.distance(gt_line, matched_line) / max(len(matched_line), len(gt_line))
|
| 812 |
+
return distance_matrix
|
| 813 |
+
except ZeroDivisionError:
|
| 814 |
+
#print("ZeroDivisionError occurred. Outputting norm_gt_lines and norm_pred_lines:")
|
| 815 |
+
# print("norm_gt_lines:", gt_lines)
|
| 816 |
+
# print("norm_pred_lines:", matched_lines)
|
| 817 |
+
raise
|
| 818 |
+
|
| 819 |
+
def get_gt_pred_lines(gt_items, pred_items, line_type):
|
| 820 |
+
norm_html_lines = []
|
| 821 |
+
gt_lines = []
|
| 822 |
+
gt_cat_list = []
|
| 823 |
+
for item in gt_items:
|
| 824 |
+
if item.get('fine_category_type'):
|
| 825 |
+
gt_cat_list.append(item['fine_category_type'])
|
| 826 |
+
else:
|
| 827 |
+
gt_cat_list.append(item['category_type'])
|
| 828 |
+
if item.get('content'):
|
| 829 |
+
gt_lines.append(str(item['content']))
|
| 830 |
+
norm_html_lines.append(str(item['content']))
|
| 831 |
+
elif line_type == 'text':
|
| 832 |
+
gt_lines.append(str(item['text']))
|
| 833 |
+
elif line_type == 'html_table':
|
| 834 |
+
gt_lines.append(str(item['html']))
|
| 835 |
+
elif line_type == 'formula':
|
| 836 |
+
gt_lines.append(str(item['latex']))
|
| 837 |
+
elif line_type == 'latex_table':
|
| 838 |
+
gt_lines.append(str(item['latex']))
|
| 839 |
+
norm_html_lines.append(str(item['html']))
|
| 840 |
+
|
| 841 |
+
pred_lines = [str(item['content']) for item in pred_items]
|
| 842 |
+
|
| 843 |
+
|
| 844 |
+
if line_type == 'formula':
|
| 845 |
+
norm_gt_lines = [normalized_formula(_) for _ in gt_lines]
|
| 846 |
+
norm_pred_lines = [normalized_formula(_) for _ in pred_lines]
|
| 847 |
+
elif line_type == 'text':
|
| 848 |
+
# norm_gt_lines = [textblock_with_norm_formula(_) for _ in gt_lines]
|
| 849 |
+
# norm_pred_lines = [textblock_with_norm_formula(_) for _ in pred_lines]
|
| 850 |
+
norm_gt_lines = [clean_string(textblock2unicode(_)) for _ in gt_lines]
|
| 851 |
+
norm_pred_lines = [clean_string(textblock2unicode(_)) for _ in pred_lines]
|
| 852 |
+
# norm_gt_lines = get_norm_text_lines(gt_lines)
|
| 853 |
+
# norm_pred_lines = get_norm_text_lines(pred_lines)
|
| 854 |
+
else:
|
| 855 |
+
norm_gt_lines = gt_lines
|
| 856 |
+
norm_pred_lines = pred_lines
|
| 857 |
+
|
| 858 |
+
if line_type == 'latex_table':
|
| 859 |
+
gt_lines = norm_html_lines
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
filtered_lists = [(a, b, c) for a, b, c in zip(gt_lines, norm_gt_lines, gt_cat_list) if a and b]
|
| 863 |
+
|
| 864 |
+
# decompress to three lists
|
| 865 |
+
if filtered_lists:
|
| 866 |
+
gt_lines_c, norm_gt_lines_c, gt_cat_list_c = zip(*filtered_lists)
|
| 867 |
+
|
| 868 |
+
# convert to lists
|
| 869 |
+
gt_lines_c = list(gt_lines_c)
|
| 870 |
+
norm_gt_lines_c = list(norm_gt_lines_c)
|
| 871 |
+
gt_cat_list_c = list(gt_cat_list_c)
|
| 872 |
+
else:
|
| 873 |
+
gt_lines_c = []
|
| 874 |
+
norm_gt_lines_c = []
|
| 875 |
+
gt_cat_list_c = []
|
| 876 |
+
|
| 877 |
+
# pred's empty values
|
| 878 |
+
filtered_lists = [(a, b) for a, b in zip(pred_lines, norm_pred_lines) if a and b]
|
| 879 |
+
|
| 880 |
+
# decompress to two lists
|
| 881 |
+
if filtered_lists:
|
| 882 |
+
pred_lines_c, norm_pred_lines_c = zip(*filtered_lists)
|
| 883 |
+
|
| 884 |
+
# convert to lists
|
| 885 |
+
pred_lines_c = list(pred_lines_c)
|
| 886 |
+
norm_pred_lines_c = list(norm_pred_lines_c)
|
| 887 |
+
else:
|
| 888 |
+
pred_lines_c = []
|
| 889 |
+
norm_pred_lines_c = []
|
| 890 |
+
|
| 891 |
+
return gt_lines_c, norm_gt_lines_c, gt_cat_list_c, pred_lines_c, norm_pred_lines_c
|
| 892 |
+
# return gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines
|
| 893 |
+
|
| 894 |
+
|
| 895 |
+
def match_gt2pred_simple(gt_items, pred_items, line_type, img_name):
|
| 896 |
+
|
| 897 |
+
gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines = get_gt_pred_lines(gt_items, pred_items, line_type)
|
| 898 |
+
|
| 899 |
+
match_list = []
|
| 900 |
+
if not norm_gt_lines: # not matched pred should be concatenated
|
| 901 |
+
# print("One of the lists is empty. Returning an empty gt result.")
|
| 902 |
+
# for pred_idx in range(len(norm_pred_lines)):
|
| 903 |
+
pred_idx_list = range(len(norm_pred_lines))
|
| 904 |
+
match_list.append({
|
| 905 |
+
'gt_idx': [""],
|
| 906 |
+
'gt': "",
|
| 907 |
+
'pred_idx': pred_idx_list,
|
| 908 |
+
'pred': ''.join(pred_lines[_] for _ in pred_idx_list),
|
| 909 |
+
'gt_position': [""],
|
| 910 |
+
'pred_position': pred_items[pred_idx_list[0]]['position'][0], # get the first pred's position
|
| 911 |
+
'norm_gt': "",
|
| 912 |
+
'norm_pred': ''.join(norm_pred_lines[_] for _ in pred_idx_list),
|
| 913 |
+
'gt_category_type': "",
|
| 914 |
+
'pred_category_type': get_pred_category_type(pred_idx_list[0], pred_items), # get the first pred's category
|
| 915 |
+
'gt_attribute': [{}],
|
| 916 |
+
'edit': 1,
|
| 917 |
+
'img_id': img_name
|
| 918 |
+
})
|
| 919 |
+
return match_list
|
| 920 |
+
elif not norm_pred_lines: # not matched gt should be separated
|
| 921 |
+
# print("One of the lists is empty. Returning an empty pred result.")
|
| 922 |
+
for gt_idx in range(len(norm_gt_lines)):
|
| 923 |
+
match_list.append({
|
| 924 |
+
'gt_idx': [gt_idx],
|
| 925 |
+
'gt': gt_lines[gt_idx],
|
| 926 |
+
'pred_idx': [""],
|
| 927 |
+
'pred': "",
|
| 928 |
+
'gt_position': [gt_items[gt_idx].get('order') if gt_items[gt_idx].get('order') else gt_items[gt_idx].get('position', [""])[0]],
|
| 929 |
+
'pred_position': "",
|
| 930 |
+
'norm_gt': norm_gt_lines[gt_idx],
|
| 931 |
+
'norm_pred': "",
|
| 932 |
+
'gt_category_type': gt_cat_list[gt_idx],
|
| 933 |
+
'pred_category_type': "",
|
| 934 |
+
'gt_attribute': [gt_items[gt_idx].get("attribute", {})],
|
| 935 |
+
'edit': 1,
|
| 936 |
+
'img_id': img_name
|
| 937 |
+
})
|
| 938 |
+
return match_list
|
| 939 |
+
|
| 940 |
+
cost_matrix = compute_edit_distance_matrix_new(norm_gt_lines, norm_pred_lines)
|
| 941 |
+
|
| 942 |
+
row_ind, col_ind = linear_sum_assignment(cost_matrix)
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
for gt_idx in range(len(norm_gt_lines)):
|
| 946 |
+
if gt_idx in row_ind:
|
| 947 |
+
row_i = list(row_ind).index(gt_idx)
|
| 948 |
+
pred_idx = int(col_ind[row_i])
|
| 949 |
+
pred_line = pred_lines[pred_idx]
|
| 950 |
+
norm_pred_line = norm_pred_lines[pred_idx]
|
| 951 |
+
edit = cost_matrix[gt_idx][pred_idx]
|
| 952 |
+
# print('edit_dist', edit)
|
| 953 |
+
# if edit > 0.7:
|
| 954 |
+
# print('! Not match')
|
| 955 |
+
else:
|
| 956 |
+
# print('No match pred')
|
| 957 |
+
pred_idx = ""
|
| 958 |
+
pred_line = ""
|
| 959 |
+
norm_pred_line = ""
|
| 960 |
+
edit = 1
|
| 961 |
+
|
| 962 |
+
match_list.append({
|
| 963 |
+
'gt_idx': [gt_idx],
|
| 964 |
+
'gt': gt_lines[gt_idx],
|
| 965 |
+
'norm_gt': norm_gt_lines[gt_idx],
|
| 966 |
+
'gt_category_type': gt_cat_list[gt_idx],
|
| 967 |
+
'gt_position': [gt_items[gt_idx].get('order') if gt_items[gt_idx].get('order') else gt_items[gt_idx].get('position', [""])[0]],
|
| 968 |
+
'gt_attribute': [gt_items[gt_idx].get("attribute", {})],
|
| 969 |
+
'pred_idx': [pred_idx],
|
| 970 |
+
'pred': pred_line,
|
| 971 |
+
'norm_pred': norm_pred_line,
|
| 972 |
+
'pred_category_type': get_pred_category_type(pred_idx, pred_items) if pred_idx else "",
|
| 973 |
+
'pred_position': pred_items[pred_idx]['position'][0] if pred_idx else "",
|
| 974 |
+
'edit': edit,
|
| 975 |
+
'img_id': img_name
|
| 976 |
+
})
|
| 977 |
+
# print('-'*10)
|
| 978 |
+
# [([0,1], 0),(2, 1), (1,2)] --> [0,2,1]/[0,1,2]
|
| 979 |
+
|
| 980 |
+
pred_idx_list = [pred_idx for pred_idx in range(len(norm_pred_lines)) if pred_idx not in col_ind] # get not matched preds
|
| 981 |
+
if pred_idx_list: # if there are still remaining pred_idx, concatenate all preds
|
| 982 |
+
match_list.append({
|
| 983 |
+
'gt_idx': [""],
|
| 984 |
+
'gt': "",
|
| 985 |
+
'pred_idx': pred_idx_list,
|
| 986 |
+
'pred': ''.join(pred_lines[_] for _ in pred_idx_list),
|
| 987 |
+
'gt_position': [""],
|
| 988 |
+
'pred_position': pred_items[pred_idx_list[0]]['position'][0], # get the first pred's position
|
| 989 |
+
'norm_gt': "",
|
| 990 |
+
'norm_pred': ''.join(norm_pred_lines[_] for _ in pred_idx_list),
|
| 991 |
+
'gt_category_type': "",
|
| 992 |
+
'pred_category_type': get_pred_category_type(pred_idx_list[0], pred_items), # get the first pred's category
|
| 993 |
+
'gt_attribute': [{}],
|
| 994 |
+
'edit': 1,
|
| 995 |
+
'img_id': img_name
|
| 996 |
+
})
|
| 997 |
+
return match_list
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
def match_gt2pred_no_split(gt_items, pred_items, line_type, img_name):
|
| 1001 |
+
# directly concatenate gt and pred by position
|
| 1002 |
+
gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines = get_gt_pred_lines(gt_items, pred_items, line_type)
|
| 1003 |
+
gt_line_with_position = []
|
| 1004 |
+
for gt_line, norm_gt_line, gt_item in zip(gt_lines, norm_gt_lines, gt_items):
|
| 1005 |
+
gt_position = gt_item['order'] if gt_item.get('order') else gt_item.get('position', [""])[0]
|
| 1006 |
+
if gt_position:
|
| 1007 |
+
gt_line_with_position.append((gt_position, gt_line, norm_gt_line))
|
| 1008 |
+
sorted_gt_lines = sorted(gt_line_with_position, key=lambda x: x[0])
|
| 1009 |
+
gt = '\n\n'.join([_[1] for _ in sorted_gt_lines])
|
| 1010 |
+
norm_gt = '\n\n'.join([_[2] for _ in sorted_gt_lines])
|
| 1011 |
+
pred_line_with_position = [(pred_item['position'], pred_line, pred_norm_line) for pred_line, pred_norm_line, pred_item in zip(pred_lines, norm_pred_lines, pred_items)]
|
| 1012 |
+
sorted_pred_lines = sorted(pred_line_with_position, key=lambda x: x[0])
|
| 1013 |
+
pred = '\n\n'.join([_[1] for _ in sorted_pred_lines])
|
| 1014 |
+
norm_pred = '\n\n'.join([_[2] for _ in sorted_pred_lines])
|
| 1015 |
+
# edit = Levenshtein.distance(norm_gt, norm_pred)/max(len(norm_gt), len(norm_pred))
|
| 1016 |
+
if norm_gt or norm_pred:
|
| 1017 |
+
return [{
|
| 1018 |
+
'gt_idx': [0],
|
| 1019 |
+
'gt': gt,
|
| 1020 |
+
'norm_gt': norm_gt,
|
| 1021 |
+
'gt_category_type': "text_merge",
|
| 1022 |
+
'gt_position': [""],
|
| 1023 |
+
'gt_attribute': [{}],
|
| 1024 |
+
'pred_idx': [0],
|
| 1025 |
+
'pred': pred,
|
| 1026 |
+
'norm_pred': norm_pred,
|
| 1027 |
+
'pred_category_type': "text_merge",
|
| 1028 |
+
'pred_position': "",
|
| 1029 |
+
# 'edit': edit,
|
| 1030 |
+
'img_id': img_name
|
| 1031 |
+
}]
|
| 1032 |
+
else:
|
| 1033 |
+
return []
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
from scipy.optimize import linear_sum_assignment
|
| 1037 |
+
# from rapidfuzz.distance import Levenshtein
|
| 1038 |
+
import Levenshtein
|
| 1039 |
+
from collections import defaultdict
|
| 1040 |
+
import copy
|
| 1041 |
+
import pdb
|
| 1042 |
+
import numpy as np
|
| 1043 |
+
import evaluate
|
| 1044 |
+
from collections import Counter
|
| 1045 |
+
from Levenshtein import distance as Levenshtein_distance
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
def match_gt2pred_quick(gt_items, pred_items, line_type, img_name):
|
| 1049 |
+
|
| 1050 |
+
gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines= get_gt_pred_lines(gt_items, pred_items, line_type)
|
| 1051 |
+
all_gt_indices = set(range(len(norm_gt_lines)))
|
| 1052 |
+
all_pred_indices = set(range(len(norm_pred_lines)))
|
| 1053 |
+
|
| 1054 |
+
if not norm_gt_lines:
|
| 1055 |
+
match_list = []
|
| 1056 |
+
for pred_idx in range(len(norm_pred_lines)):
|
| 1057 |
+
match_list.append({
|
| 1058 |
+
'gt_idx': [""],
|
| 1059 |
+
'gt': "",
|
| 1060 |
+
'pred_idx': [pred_idx],
|
| 1061 |
+
'pred': pred_lines[pred_idx],
|
| 1062 |
+
'gt_position': "",
|
| 1063 |
+
'pred_position': pred_items[pred_idx]['position'][0],
|
| 1064 |
+
'norm_gt': "",
|
| 1065 |
+
'norm_pred': norm_pred_lines[pred_idx],
|
| 1066 |
+
'gt_category_type': "",
|
| 1067 |
+
'pred_category_type': get_pred_category_type(pred_idx, pred_items),
|
| 1068 |
+
'gt_attribute': [{}],
|
| 1069 |
+
'edit': 1,
|
| 1070 |
+
'img_id': img_name
|
| 1071 |
+
})
|
| 1072 |
+
return match_list
|
| 1073 |
+
elif not norm_pred_lines:
|
| 1074 |
+
match_list = []
|
| 1075 |
+
for gt_idx in range(len(norm_gt_lines)):
|
| 1076 |
+
match_list.append({
|
| 1077 |
+
'gt_idx': [gt_idx],
|
| 1078 |
+
'gt': gt_lines[gt_idx],
|
| 1079 |
+
'pred_idx': [""],
|
| 1080 |
+
'pred': "",
|
| 1081 |
+
'gt_position': [gt_items[gt_idx].get('order') if gt_items[gt_idx].get('order') else gt_items[gt_idx].get('position', [""])[0]],
|
| 1082 |
+
'pred_position': "",
|
| 1083 |
+
'norm_gt': norm_gt_lines[gt_idx],
|
| 1084 |
+
'norm_pred': "",
|
| 1085 |
+
'gt_category_type': gt_cat_list[gt_idx],
|
| 1086 |
+
'pred_category_type': "",
|
| 1087 |
+
'gt_attribute': [gt_items[gt_idx].get("attribute", {})],
|
| 1088 |
+
'edit': 1,
|
| 1089 |
+
'img_id': img_name
|
| 1090 |
+
})
|
| 1091 |
+
return match_list
|
| 1092 |
+
elif len(norm_gt_lines) == 1 and len(norm_pred_lines) == 1:
|
| 1093 |
+
edit_distance = Levenshtein_distance(norm_gt_lines[0], norm_pred_lines[0])
|
| 1094 |
+
normalized_edit_distance = edit_distance / max(len(norm_gt_lines[0]), len(norm_pred_lines[0]))
|
| 1095 |
+
return [{
|
| 1096 |
+
'gt_idx': [0],
|
| 1097 |
+
'gt': gt_lines[0],
|
| 1098 |
+
'pred_idx': [0],
|
| 1099 |
+
'pred': pred_lines[0],
|
| 1100 |
+
'gt_position': [gt_items[0].get('order') if gt_items[0].get('order') else gt_items[0].get('position', [""])[0]],
|
| 1101 |
+
'pred_position': pred_items[0]['position'][0],
|
| 1102 |
+
'norm_gt': norm_gt_lines[0],
|
| 1103 |
+
'norm_pred': norm_pred_lines[0],
|
| 1104 |
+
'gt_category_type': gt_cat_list[0],
|
| 1105 |
+
'pred_category_type': get_pred_category_type(0, pred_items),
|
| 1106 |
+
'gt_attribute': [gt_items[0].get("attribute", {})],
|
| 1107 |
+
'edit': normalized_edit_distance,
|
| 1108 |
+
'img_id': img_name
|
| 1109 |
+
}]
|
| 1110 |
+
|
| 1111 |
+
cost_matrix = compute_edit_distance_matrix_new(norm_gt_lines, norm_pred_lines)
|
| 1112 |
+
|
| 1113 |
+
matched_col_idx, row_ind, cost_list = cal_final_match(cost_matrix, norm_gt_lines, norm_pred_lines)
|
| 1114 |
+
|
| 1115 |
+
gt_lens_dict, pred_lens_dict = initialize_indices(norm_gt_lines, norm_pred_lines)
|
| 1116 |
+
|
| 1117 |
+
matches, unmatched_gt_indices, unmatched_pred_indices = process_matches(matched_col_idx, row_ind, cost_list, norm_gt_lines, norm_pred_lines, pred_lines)
|
| 1118 |
+
|
| 1119 |
+
matching_dict = fuzzy_match_unmatched_items(unmatched_gt_indices, norm_gt_lines, norm_pred_lines)
|
| 1120 |
+
|
| 1121 |
+
final_matches = merge_matches(matches, matching_dict)
|
| 1122 |
+
|
| 1123 |
+
recalculate_edit_distances(final_matches, gt_lens_dict, norm_gt_lines, norm_pred_lines)
|
| 1124 |
+
|
| 1125 |
+
converted_results = convert_final_matches(final_matches, norm_gt_lines, norm_pred_lines)
|
| 1126 |
+
|
| 1127 |
+
merged_results = merge_duplicates_add_unmatched(converted_results, norm_gt_lines, norm_pred_lines, gt_lines, pred_lines, all_gt_indices, all_pred_indices)
|
| 1128 |
+
|
| 1129 |
+
for entry in merged_results:
|
| 1130 |
+
entry['gt_idx'] = [entry['gt_idx']] if not isinstance(entry['gt_idx'], list) else entry['gt_idx']
|
| 1131 |
+
entry['pred_idx'] = [entry['pred_idx']] if not isinstance(entry['pred_idx'], list) else entry['pred_idx']
|
| 1132 |
+
entry['gt_position'] = [gt_items[_].get('order') if gt_items[_].get('order') else gt_items[_].get('position', [""])[0] for _ in entry['gt_idx']] if entry['gt_idx'] != [""] else [""]
|
| 1133 |
+
entry['pred_position'] = pred_items[entry['pred_idx'][0]]['position'][0] if entry['pred_idx'] != [""] else ""
|
| 1134 |
+
entry['gt'] = ''.join([gt_lines[_] for _ in entry['gt_idx']]) if entry['gt_idx'] != [""] else ""
|
| 1135 |
+
entry['pred'] = ''.join([pred_lines[_] for _ in entry['pred_idx']]) if entry['pred_idx'] != [""] else ""
|
| 1136 |
+
entry['norm_gt'] = ''.join([norm_gt_lines[_] for _ in entry['gt_idx']]) if entry['gt_idx'] != [""] else ""
|
| 1137 |
+
entry['norm_pred'] = ''.join([norm_pred_lines[_] for _ in entry['pred_idx']]) if entry['pred_idx'] != [""] else ""
|
| 1138 |
+
|
| 1139 |
+
if entry['gt_idx'] != [""]:
|
| 1140 |
+
ignore_type = ['figure_caption', 'figure_footnote', 'table_caption', 'table_footnote', 'code_algorithm', 'code_algorithm_caption', 'header', 'footer', 'page_footnote', 'page_number', 'equation_caption']
|
| 1141 |
+
gt_cagegory_clean = [gt_cat_list[_] for _ in entry['gt_idx'] if gt_cat_list[_] not in ignore_type]
|
| 1142 |
+
if gt_cagegory_clean:
|
| 1143 |
+
entry['gt_category_type'] = Counter(gt_cagegory_clean).most_common(1)[0][0]
|
| 1144 |
+
else:
|
| 1145 |
+
entry['gt_category_type'] = Counter([gt_cat_list[_] for _ in entry['gt_idx']]).most_common(1)[0][0]
|
| 1146 |
+
else:
|
| 1147 |
+
entry['gt_category_type'] = ""
|
| 1148 |
+
entry['pred_category_type'] = get_pred_category_type(entry['pred_idx'][0], pred_items) if entry['pred_idx'] != [""] else ""
|
| 1149 |
+
entry['gt_attribute'] = [gt_items[_].get("attribute", {}) for _ in entry['gt_idx']] if entry['gt_idx'] != [""] else [{}]
|
| 1150 |
+
entry['img_id'] = img_name
|
| 1151 |
+
|
| 1152 |
+
return merged_results
|
| 1153 |
+
|
| 1154 |
+
|
| 1155 |
+
def merge_duplicates_add_unmatched(converted_results, norm_gt_lines, norm_pred_lines, gt_lines, pred_lines, all_gt_indices, all_pred_indices):
|
| 1156 |
+
merged_results = []
|
| 1157 |
+
processed_pred = set()
|
| 1158 |
+
processed_gt = set()
|
| 1159 |
+
|
| 1160 |
+
for entry in converted_results:
|
| 1161 |
+
pred_idx = tuple(entry['pred_idx']) if isinstance(entry['pred_idx'], list) else (entry['pred_idx'],)
|
| 1162 |
+
if pred_idx not in processed_pred and pred_idx != ("",):
|
| 1163 |
+
merged_entry = {
|
| 1164 |
+
'gt_idx': [entry['gt_idx']],
|
| 1165 |
+
'gt': entry['gt'],
|
| 1166 |
+
'pred_idx': entry['pred_idx'],
|
| 1167 |
+
'pred': entry['pred'],
|
| 1168 |
+
'edit': entry['edit']
|
| 1169 |
+
}
|
| 1170 |
+
for other_entry in converted_results:
|
| 1171 |
+
other_pred_idx = tuple(other_entry['pred_idx']) if isinstance(other_entry['pred_idx'], list) else (other_entry['pred_idx'],)
|
| 1172 |
+
if other_pred_idx == pred_idx and other_entry is not entry:
|
| 1173 |
+
merged_entry['gt_idx'].append(other_entry['gt_idx'])
|
| 1174 |
+
merged_entry['gt'] += other_entry['gt']
|
| 1175 |
+
processed_gt.add(other_entry['gt_idx'])
|
| 1176 |
+
merged_results.append(merged_entry)
|
| 1177 |
+
processed_pred.add(pred_idx)
|
| 1178 |
+
processed_gt.add(entry['gt_idx'])
|
| 1179 |
+
|
| 1180 |
+
for entry in converted_results:
|
| 1181 |
+
if entry['gt_idx'] not in processed_gt:
|
| 1182 |
+
merged_results.append(entry)
|
| 1183 |
+
|
| 1184 |
+
for gt_idx in range(len(norm_gt_lines)):
|
| 1185 |
+
if gt_idx not in processed_gt:
|
| 1186 |
+
merged_results.append({
|
| 1187 |
+
'gt_idx': [gt_idx],
|
| 1188 |
+
'gt': gt_lines[gt_idx],
|
| 1189 |
+
'pred_idx': [""],
|
| 1190 |
+
'pred': "",
|
| 1191 |
+
'edit': 1
|
| 1192 |
+
})
|
| 1193 |
+
return merged_results
|
| 1194 |
+
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
|
| 1198 |
+
def formula_format(formula_matches, img_name):
|
| 1199 |
+
return [
|
| 1200 |
+
{
|
| 1201 |
+
"gt": item["gt"],
|
| 1202 |
+
"pred": item["pred"],
|
| 1203 |
+
"img_id": f"{img_name}_{i}"
|
| 1204 |
+
}
|
| 1205 |
+
for i, item in enumerate(formula_matches)
|
| 1206 |
+
]
|
| 1207 |
+
|
| 1208 |
+
|
| 1209 |
+
def merge_lists_with_sublists(main_list, sub_lists):
|
| 1210 |
+
main_list_final = list(copy.deepcopy(main_list))
|
| 1211 |
+
for sub_list in sub_lists:
|
| 1212 |
+
pop_idx = main_list_final.index(sub_list[0])
|
| 1213 |
+
for _ in sub_list:
|
| 1214 |
+
main_list_final.pop(pop_idx)
|
| 1215 |
+
main_list_final.insert(pop_idx, sub_list)
|
| 1216 |
+
return main_list_final
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
def sub_pred_fuzzy_matching(gt, pred):
|
| 1220 |
+
|
| 1221 |
+
min_d = float('inf')
|
| 1222 |
+
# pos = -1
|
| 1223 |
+
|
| 1224 |
+
gt_len = len(gt)
|
| 1225 |
+
pred_len = len(pred)
|
| 1226 |
+
|
| 1227 |
+
if gt_len >= pred_len and pred_len > 0:
|
| 1228 |
+
for i in range(gt_len - pred_len + 1):
|
| 1229 |
+
sub = gt[i:i + pred_len]
|
| 1230 |
+
dist = Levenshtein_distance(sub, pred)/pred_len
|
| 1231 |
+
if dist < min_d:
|
| 1232 |
+
min_d = dist
|
| 1233 |
+
pos = i
|
| 1234 |
+
|
| 1235 |
+
return min_d
|
| 1236 |
+
else:
|
| 1237 |
+
return False
|
| 1238 |
+
|
| 1239 |
+
def sub_gt_fuzzy_matching(pred, gt):
|
| 1240 |
+
|
| 1241 |
+
min_d = float('inf')
|
| 1242 |
+
pos = ""
|
| 1243 |
+
matched_sub = ""
|
| 1244 |
+
gt_len = len(gt)
|
| 1245 |
+
pred_len = len(pred)
|
| 1246 |
+
|
| 1247 |
+
if pred_len >= gt_len and gt_len > 0:
|
| 1248 |
+
for i in range(pred_len - gt_len + 1):
|
| 1249 |
+
sub = pred[i:i + gt_len]
|
| 1250 |
+
dist = Levenshtein.distance(sub, gt) /gt_len
|
| 1251 |
+
if dist < min_d:
|
| 1252 |
+
min_d = dist
|
| 1253 |
+
pos = i
|
| 1254 |
+
matched_sub = sub
|
| 1255 |
+
return min_d, pos, gt_len, matched_sub
|
| 1256 |
+
else:
|
| 1257 |
+
return 1, "", gt_len, ""
|
| 1258 |
+
|
| 1259 |
+
|
| 1260 |
+
def get_final_subset(subset_certain, subset_certain_cost):
|
| 1261 |
+
if not subset_certain or not subset_certain_cost:
|
| 1262 |
+
return []
|
| 1263 |
+
|
| 1264 |
+
subset_turple = sorted([(a, b) for a, b in zip(subset_certain, subset_certain_cost)], key=lambda x: x[0][0])
|
| 1265 |
+
|
| 1266 |
+
group_list = defaultdict(list)
|
| 1267 |
+
group_idx = 0
|
| 1268 |
+
group_list[group_idx].append(subset_turple[0])
|
| 1269 |
+
|
| 1270 |
+
for item in subset_turple[1:]:
|
| 1271 |
+
overlap_flag = False
|
| 1272 |
+
for subset in group_list[group_idx]:
|
| 1273 |
+
for idx in item[0]:
|
| 1274 |
+
if idx in subset[0]:
|
| 1275 |
+
overlap_flag = True
|
| 1276 |
+
break
|
| 1277 |
+
if overlap_flag:
|
| 1278 |
+
break
|
| 1279 |
+
if overlap_flag:
|
| 1280 |
+
group_list[group_idx].append(item)
|
| 1281 |
+
else:
|
| 1282 |
+
group_idx += 1
|
| 1283 |
+
group_list[group_idx].append(item)
|
| 1284 |
+
|
| 1285 |
+
final_subset = []
|
| 1286 |
+
for _, group in group_list.items():
|
| 1287 |
+
if len(group) == 1:
|
| 1288 |
+
final_subset.append(group[0][0])
|
| 1289 |
+
else:
|
| 1290 |
+
path_dict = defaultdict(list)
|
| 1291 |
+
path_idx = 0
|
| 1292 |
+
path_dict[path_idx].append(group[0])
|
| 1293 |
+
|
| 1294 |
+
for subset in group[1:]:
|
| 1295 |
+
new_path = True
|
| 1296 |
+
for path_idx_s, path_items in path_dict.items():
|
| 1297 |
+
is_dup = False
|
| 1298 |
+
is_same = False
|
| 1299 |
+
for path_item in path_items:
|
| 1300 |
+
if path_item[0] == subset[0]:
|
| 1301 |
+
is_dup = True
|
| 1302 |
+
is_same = True
|
| 1303 |
+
if path_item[1] > subset[1]:
|
| 1304 |
+
path_dict[path_idx_s].pop(path_dict[path_idx_s].index(path_item))
|
| 1305 |
+
path_dict[path_idx_s].append(subset)
|
| 1306 |
+
else:
|
| 1307 |
+
for num_1 in path_item[0]:
|
| 1308 |
+
for num_2 in subset[0]:
|
| 1309 |
+
if num_1 == num_2:
|
| 1310 |
+
is_dup = True
|
| 1311 |
+
if not is_dup:
|
| 1312 |
+
path_dict[path_idx_s].append(subset)
|
| 1313 |
+
new_path = False
|
| 1314 |
+
if is_same:
|
| 1315 |
+
new_path = False
|
| 1316 |
+
if new_path:
|
| 1317 |
+
path_idx = len(path_dict.keys())
|
| 1318 |
+
path_dict[path_idx].append(subset)
|
| 1319 |
+
|
| 1320 |
+
saved_cost = float('inf')
|
| 1321 |
+
saved_subset = []
|
| 1322 |
+
for path_idx, path in path_dict.items():
|
| 1323 |
+
avg_cost = sum([i[1] for i in path]) / len(path)
|
| 1324 |
+
if avg_cost < saved_cost:
|
| 1325 |
+
saved_subset = [i[0] for i in path]
|
| 1326 |
+
saved_cost = avg_cost
|
| 1327 |
+
|
| 1328 |
+
final_subset.extend(saved_subset)
|
| 1329 |
+
|
| 1330 |
+
return final_subset
|
| 1331 |
+
|
| 1332 |
+
def judge_pred_merge(gt_list, pred_list, threshold=0.6):
|
| 1333 |
+
if len(pred_list) == 1:
|
| 1334 |
+
return False, False
|
| 1335 |
+
|
| 1336 |
+
cur_pred = ' '.join(pred_list[:-1])
|
| 1337 |
+
merged_pred = ' '.join(pred_list)
|
| 1338 |
+
|
| 1339 |
+
cur_dist = Levenshtein.distance(gt_list[0], cur_pred) / max(len(gt_list[0]), len(cur_pred))
|
| 1340 |
+
merged_dist = Levenshtein.distance(gt_list[0], merged_pred) / max(len(gt_list[0]), len(merged_pred))
|
| 1341 |
+
|
| 1342 |
+
if merged_dist > cur_dist:
|
| 1343 |
+
return False, False
|
| 1344 |
+
|
| 1345 |
+
cur_fuzzy_dists = [sub_pred_fuzzy_matching(gt_list[0], cur_pred) for cur_pred in pred_list[:-1]]
|
| 1346 |
+
if any(dist is False or dist > threshold for dist in cur_fuzzy_dists):
|
| 1347 |
+
return False, False
|
| 1348 |
+
|
| 1349 |
+
add_fuzzy_dist = sub_pred_fuzzy_matching(gt_list[0], pred_list[-1])
|
| 1350 |
+
if add_fuzzy_dist is False:
|
| 1351 |
+
return False, False
|
| 1352 |
+
|
| 1353 |
+
merged_pred_flag = add_fuzzy_dist < threshold
|
| 1354 |
+
continue_flag = len(merged_pred) <= len(gt_list[0])
|
| 1355 |
+
|
| 1356 |
+
return merged_pred_flag, continue_flag
|
| 1357 |
+
|
| 1358 |
+
def deal_with_truncated(cost_matrix, norm_gt_lines, norm_pred_lines):
|
| 1359 |
+
matched_first = np.argwhere(cost_matrix < 0.25)
|
| 1360 |
+
masked_gt_idx = [i[0] for i in matched_first]
|
| 1361 |
+
unmasked_gt_idx = [i for i in range(cost_matrix.shape[0]) if i not in masked_gt_idx]
|
| 1362 |
+
masked_pred_idx = [i[1] for i in matched_first]
|
| 1363 |
+
unmasked_pred_idx = [i for i in range(cost_matrix.shape[1]) if i not in masked_pred_idx]
|
| 1364 |
+
|
| 1365 |
+
merges_gt_dict = {}
|
| 1366 |
+
merges_pred_dict = {}
|
| 1367 |
+
merged_gt_subsets = []
|
| 1368 |
+
|
| 1369 |
+
for gt_idx in unmasked_gt_idx:
|
| 1370 |
+
check_merge_subset = []
|
| 1371 |
+
merged_dist = []
|
| 1372 |
+
|
| 1373 |
+
for pred_idx in unmasked_pred_idx:
|
| 1374 |
+
step = 1
|
| 1375 |
+
merged_pred = [norm_pred_lines[pred_idx]]
|
| 1376 |
+
|
| 1377 |
+
while True:
|
| 1378 |
+
if pred_idx + step in masked_pred_idx or pred_idx + step >= len(norm_pred_lines):
|
| 1379 |
+
break
|
| 1380 |
+
else:
|
| 1381 |
+
merged_pred.append(norm_pred_lines[pred_idx + step])
|
| 1382 |
+
merged_pred_flag, continue_flag = judge_pred_merge([norm_gt_lines[gt_idx]], merged_pred)
|
| 1383 |
+
if not merged_pred_flag:
|
| 1384 |
+
break
|
| 1385 |
+
else:
|
| 1386 |
+
step += 1
|
| 1387 |
+
if not continue_flag:
|
| 1388 |
+
break
|
| 1389 |
+
|
| 1390 |
+
check_merge_subset.append(list(range(pred_idx, pred_idx + step)))
|
| 1391 |
+
matched_line = ' '.join([norm_pred_lines[i] for i in range(pred_idx, pred_idx + step)])
|
| 1392 |
+
dist = Levenshtein_distance(norm_gt_lines[gt_idx], matched_line) / max(len(matched_line), len(norm_gt_lines[gt_idx]))
|
| 1393 |
+
merged_dist.append(dist)
|
| 1394 |
+
|
| 1395 |
+
if not merged_dist:
|
| 1396 |
+
subset_certain = []
|
| 1397 |
+
min_cost_idx = ""
|
| 1398 |
+
min_cost = float('inf')
|
| 1399 |
+
else:
|
| 1400 |
+
min_cost = min(merged_dist)
|
| 1401 |
+
min_cost_idx = merged_dist.index(min_cost)
|
| 1402 |
+
subset_certain = check_merge_subset[min_cost_idx]
|
| 1403 |
+
|
| 1404 |
+
merges_gt_dict[gt_idx] = {
|
| 1405 |
+
'merge_subset': check_merge_subset,
|
| 1406 |
+
'merged_cost': merged_dist,
|
| 1407 |
+
'min_cost_idx': min_cost_idx,
|
| 1408 |
+
'subset_certain': subset_certain,
|
| 1409 |
+
'min_cost': min_cost
|
| 1410 |
+
}
|
| 1411 |
+
|
| 1412 |
+
subset_certain = [merges_gt_dict[gt_idx]['subset_certain'] for gt_idx in unmasked_gt_idx if merges_gt_dict[gt_idx]['subset_certain']]
|
| 1413 |
+
subset_certain_cost = [merges_gt_dict[gt_idx]['min_cost'] for gt_idx in unmasked_gt_idx if merges_gt_dict[gt_idx]['subset_certain']]
|
| 1414 |
+
|
| 1415 |
+
subset_certain_final = get_final_subset(subset_certain, subset_certain_cost)
|
| 1416 |
+
|
| 1417 |
+
if not subset_certain_final:
|
| 1418 |
+
return cost_matrix, norm_pred_lines, range(len(norm_pred_lines))
|
| 1419 |
+
|
| 1420 |
+
final_pred_idx_list = merge_lists_with_sublists(range(len(norm_pred_lines)), subset_certain_final)
|
| 1421 |
+
final_norm_pred_lines = [' '.join(norm_pred_lines[idx_list[0]:idx_list[-1]+1]) if isinstance(idx_list, list) else norm_pred_lines[idx_list] for idx_list in final_pred_idx_list]
|
| 1422 |
+
|
| 1423 |
+
new_cost_matrix = compute_edit_distance_matrix_new(norm_gt_lines, final_norm_pred_lines)
|
| 1424 |
+
|
| 1425 |
+
return new_cost_matrix, final_norm_pred_lines, final_pred_idx_list
|
| 1426 |
+
|
| 1427 |
+
def cal_move_dist(gt, pred):
|
| 1428 |
+
assert len(gt) == len(pred), 'Not right length'
|
| 1429 |
+
step = 0
|
| 1430 |
+
for i, gt_c in enumerate(gt):
|
| 1431 |
+
if gt_c != pred[i]:
|
| 1432 |
+
step += abs(i - pred.index(gt_c))
|
| 1433 |
+
pred[i], pred[pred.index(gt_c)] = pred[pred.index(gt_c)], pred[i]
|
| 1434 |
+
return step / len(gt)
|
| 1435 |
+
|
| 1436 |
+
def cal_final_match(cost_matrix, norm_gt_lines, norm_pred_lines):
|
| 1437 |
+
min_indice = cost_matrix.argmax(axis=1)
|
| 1438 |
+
|
| 1439 |
+
new_cost_matrix, final_norm_pred_lines, final_pred_idx_list = deal_with_truncated(cost_matrix, norm_gt_lines, norm_pred_lines)
|
| 1440 |
+
|
| 1441 |
+
row_ind, col_ind = linear_sum_assignment(new_cost_matrix)
|
| 1442 |
+
|
| 1443 |
+
cost_list = [new_cost_matrix[r][c] for r, c in zip(row_ind, col_ind)]
|
| 1444 |
+
matched_col_idx = [final_pred_idx_list[i] for i in col_ind]
|
| 1445 |
+
|
| 1446 |
+
return matched_col_idx, row_ind, cost_list
|
| 1447 |
+
|
| 1448 |
+
def initialize_indices(norm_gt_lines, norm_pred_lines):
|
| 1449 |
+
gt_lens_dict = {idx: len(gt_line) for idx, gt_line in enumerate(norm_gt_lines)}
|
| 1450 |
+
pred_lens_dict = {idx: len(pred_line) for idx, pred_line in enumerate(norm_pred_lines)}
|
| 1451 |
+
return gt_lens_dict, pred_lens_dict
|
| 1452 |
+
|
| 1453 |
+
def process_matches(matched_col_idx, row_ind, cost_list, norm_gt_lines, norm_pred_lines, pred_lines):
|
| 1454 |
+
matches = {}
|
| 1455 |
+
unmatched_gt_indices = []
|
| 1456 |
+
unmatched_pred_indices = []
|
| 1457 |
+
|
| 1458 |
+
for i in range(len(norm_gt_lines)):
|
| 1459 |
+
if i in row_ind:
|
| 1460 |
+
idx = list(row_ind).index(i)
|
| 1461 |
+
pred_idx = matched_col_idx[idx]
|
| 1462 |
+
|
| 1463 |
+
if pred_idx is None or (isinstance(pred_idx, list) and None in pred_idx):
|
| 1464 |
+
unmatched_pred_indices.append(pred_idx)
|
| 1465 |
+
continue
|
| 1466 |
+
|
| 1467 |
+
if isinstance(pred_idx, list):
|
| 1468 |
+
pred_line = ' | '.join(norm_pred_lines[pred_idx[0]:pred_idx[-1]+1])
|
| 1469 |
+
ori_pred_line = ' | '.join(pred_lines[pred_idx[0]:pred_idx[-1]+1])
|
| 1470 |
+
matched_pred_indices_range = list(range(pred_idx[0], pred_idx[-1]+1))
|
| 1471 |
+
else:
|
| 1472 |
+
pred_line = norm_pred_lines[pred_idx]
|
| 1473 |
+
ori_pred_line = pred_lines[pred_idx]
|
| 1474 |
+
matched_pred_indices_range = [pred_idx]
|
| 1475 |
+
|
| 1476 |
+
edit = cost_list[idx]
|
| 1477 |
+
|
| 1478 |
+
if edit > 0.7:
|
| 1479 |
+
unmatched_pred_indices.extend(matched_pred_indices_range)
|
| 1480 |
+
unmatched_gt_indices.append(i)
|
| 1481 |
+
else:
|
| 1482 |
+
matches[i] = {
|
| 1483 |
+
'pred_indices': matched_pred_indices_range,
|
| 1484 |
+
'edit_distance': edit,
|
| 1485 |
+
}
|
| 1486 |
+
for matched_pred_idx in matched_pred_indices_range:
|
| 1487 |
+
if matched_pred_idx in unmatched_pred_indices:
|
| 1488 |
+
unmatched_pred_indices.remove(matched_pred_idx)
|
| 1489 |
+
else:
|
| 1490 |
+
unmatched_gt_indices.append(i)
|
| 1491 |
+
|
| 1492 |
+
return matches, unmatched_gt_indices, unmatched_pred_indices
|
| 1493 |
+
|
| 1494 |
+
def fuzzy_match_unmatched_items(unmatched_gt_indices, norm_gt_lines, norm_pred_lines):
|
| 1495 |
+
matching_dict = {}
|
| 1496 |
+
|
| 1497 |
+
for pred_idx, pred_content in enumerate(norm_pred_lines):
|
| 1498 |
+
if isinstance(pred_idx, list):
|
| 1499 |
+
continue
|
| 1500 |
+
|
| 1501 |
+
matching_indices = []
|
| 1502 |
+
|
| 1503 |
+
for unmatched_gt_idx in unmatched_gt_indices:
|
| 1504 |
+
gt_content = norm_gt_lines[unmatched_gt_idx]
|
| 1505 |
+
cur_fuzzy_dist_unmatch, cur_pos, gt_lens, matched_field = sub_gt_fuzzy_matching(pred_content, gt_content)
|
| 1506 |
+
if cur_fuzzy_dist_unmatch < 0.4:
|
| 1507 |
+
matching_indices.append(unmatched_gt_idx)
|
| 1508 |
+
|
| 1509 |
+
if matching_indices:
|
| 1510 |
+
matching_dict[pred_idx] = matching_indices
|
| 1511 |
+
|
| 1512 |
+
return matching_dict
|
| 1513 |
+
|
| 1514 |
+
def merge_matches(matches, matching_dict):
|
| 1515 |
+
final_matches = {}
|
| 1516 |
+
processed_gt_indices = set()
|
| 1517 |
+
|
| 1518 |
+
for gt_idx, match_info in matches.items():
|
| 1519 |
+
pred_indices = match_info['pred_indices']
|
| 1520 |
+
edit_distance = match_info['edit_distance']
|
| 1521 |
+
|
| 1522 |
+
pred_key = tuple(sorted(pred_indices))
|
| 1523 |
+
|
| 1524 |
+
if pred_key in final_matches:
|
| 1525 |
+
if gt_idx not in processed_gt_indices:
|
| 1526 |
+
final_matches[pred_key]['gt_indices'].append(gt_idx)
|
| 1527 |
+
processed_gt_indices.add(gt_idx)
|
| 1528 |
+
else:
|
| 1529 |
+
final_matches[pred_key] = {
|
| 1530 |
+
'gt_indices': [gt_idx],
|
| 1531 |
+
'edit_distance': edit_distance
|
| 1532 |
+
}
|
| 1533 |
+
processed_gt_indices.add(gt_idx)
|
| 1534 |
+
|
| 1535 |
+
for pred_idx, gt_indices in matching_dict.items():
|
| 1536 |
+
pred_key = (pred_idx,) if not isinstance(pred_idx, (list, tuple)) else tuple(sorted(pred_idx))
|
| 1537 |
+
|
| 1538 |
+
if pred_key in final_matches:
|
| 1539 |
+
for gt_idx in gt_indices:
|
| 1540 |
+
if gt_idx not in processed_gt_indices:
|
| 1541 |
+
final_matches[pred_key]['gt_indices'].append(gt_idx)
|
| 1542 |
+
processed_gt_indices.add(gt_idx)
|
| 1543 |
+
else:
|
| 1544 |
+
final_matches[pred_key] = {
|
| 1545 |
+
'gt_indices': [gt_idx for gt_idx in gt_indices if gt_idx not in processed_gt_indices],
|
| 1546 |
+
'edit_distance': None
|
| 1547 |
+
}
|
| 1548 |
+
processed_gt_indices.update(final_matches[pred_key]['gt_indices'])
|
| 1549 |
+
|
| 1550 |
+
return final_matches
|
| 1551 |
+
|
| 1552 |
+
|
| 1553 |
+
|
| 1554 |
+
def recalculate_edit_distances(final_matches, gt_lens_dict, norm_gt_lines, norm_pred_lines):
|
| 1555 |
+
for pred_key, info in final_matches.items():
|
| 1556 |
+
gt_indices = sorted(set(info['gt_indices']))
|
| 1557 |
+
|
| 1558 |
+
if not gt_indices:
|
| 1559 |
+
info['edit_distance'] = 1
|
| 1560 |
+
continue
|
| 1561 |
+
|
| 1562 |
+
if len(gt_indices) > 1:
|
| 1563 |
+
merged_gt_content = ''.join(norm_gt_lines[gt_idx] for gt_idx in gt_indices)
|
| 1564 |
+
pred_content = norm_pred_lines[pred_key[0]] if isinstance(pred_key[0], int) else ''
|
| 1565 |
+
|
| 1566 |
+
try:
|
| 1567 |
+
edit_distance = Levenshtein_distance(merged_gt_content, pred_content)
|
| 1568 |
+
normalized_edit_distance = edit_distance / max(len(merged_gt_content), len(pred_content))
|
| 1569 |
+
except ZeroDivisionError:
|
| 1570 |
+
normalized_edit_distance = 1
|
| 1571 |
+
|
| 1572 |
+
info['edit_distance'] = normalized_edit_distance
|
| 1573 |
+
else:
|
| 1574 |
+
gt_idx = gt_indices[0]
|
| 1575 |
+
pred_content = ' '.join(norm_pred_lines[pred_idx] for pred_idx in pred_key if isinstance(pred_idx, int))
|
| 1576 |
+
|
| 1577 |
+
try:
|
| 1578 |
+
edit_distance = Levenshtein_distance(norm_gt_lines[gt_idx], pred_content)
|
| 1579 |
+
normalized_edit_distance = edit_distance / max(len(norm_gt_lines[gt_idx]), len(pred_content))
|
| 1580 |
+
except ZeroDivisionError:
|
| 1581 |
+
normalized_edit_distance = 1
|
| 1582 |
+
|
| 1583 |
+
info['edit_distance'] = normalized_edit_distance
|
| 1584 |
+
info['pred_content'] = pred_content
|
| 1585 |
+
|
| 1586 |
+
|
| 1587 |
+
def convert_final_matches(final_matches, norm_gt_lines, norm_pred_lines):
|
| 1588 |
+
converted_results = []
|
| 1589 |
+
|
| 1590 |
+
all_gt_indices = set(range(len(norm_gt_lines)))
|
| 1591 |
+
all_pred_indices = set(range(len(norm_pred_lines)))
|
| 1592 |
+
|
| 1593 |
+
for pred_key, info in final_matches.items():
|
| 1594 |
+
pred_content = ' '.join(norm_pred_lines[pred_idx] for pred_idx in pred_key if isinstance(pred_idx, int))
|
| 1595 |
+
|
| 1596 |
+
for gt_idx in sorted(set(info['gt_indices'])):
|
| 1597 |
+
result_entry = {
|
| 1598 |
+
'gt_idx': int(gt_idx),
|
| 1599 |
+
'gt': norm_gt_lines[gt_idx],
|
| 1600 |
+
'pred_idx': list(pred_key),
|
| 1601 |
+
'pred': pred_content,
|
| 1602 |
+
'edit': info['edit_distance']
|
| 1603 |
+
}
|
| 1604 |
+
converted_results.append(result_entry)
|
| 1605 |
+
|
| 1606 |
+
matched_gt_indices = set().union(*[set(info['gt_indices']) for info in final_matches.values()])
|
| 1607 |
+
unmatched_gt_indices = all_gt_indices - matched_gt_indices
|
| 1608 |
+
matched_pred_indices = set(idx for pred_key in final_matches.keys() for idx in pred_key if isinstance(idx, int))
|
| 1609 |
+
unmatched_pred_indices = all_pred_indices - matched_pred_indices
|
| 1610 |
+
|
| 1611 |
+
if unmatched_pred_indices:
|
| 1612 |
+
if unmatched_gt_indices:
|
| 1613 |
+
distance_matrix = [
|
| 1614 |
+
[Levenshtein_distance(norm_gt_lines[gt_idx], norm_pred_lines[pred_idx]) for pred_idx in unmatched_pred_indices]
|
| 1615 |
+
for gt_idx in unmatched_gt_indices
|
| 1616 |
+
]
|
| 1617 |
+
|
| 1618 |
+
row_ind, col_ind = linear_sum_assignment(distance_matrix)
|
| 1619 |
+
|
| 1620 |
+
for i, j in zip(row_ind, col_ind):
|
| 1621 |
+
gt_idx = list(unmatched_gt_indices)[i]
|
| 1622 |
+
pred_idx = list(unmatched_pred_indices)[j]
|
| 1623 |
+
result_entry = {
|
| 1624 |
+
'gt_idx': int(gt_idx),
|
| 1625 |
+
'gt': norm_gt_lines[gt_idx],
|
| 1626 |
+
'pred_idx': [pred_idx],
|
| 1627 |
+
'pred': norm_pred_lines[pred_idx],
|
| 1628 |
+
'edit': 1
|
| 1629 |
+
}
|
| 1630 |
+
converted_results.append(result_entry)
|
| 1631 |
+
|
| 1632 |
+
matched_gt_indices.update(list(unmatched_gt_indices)[i] for i in row_ind)
|
| 1633 |
+
else:
|
| 1634 |
+
result_entry = {
|
| 1635 |
+
'gt_idx': "",
|
| 1636 |
+
'gt': '',
|
| 1637 |
+
'pred_idx': list(unmatched_pred_indices),
|
| 1638 |
+
'pred': ' '.join(norm_pred_lines[pred_idx] for pred_idx in unmatched_pred_indices),
|
| 1639 |
+
'edit': 1
|
| 1640 |
+
}
|
| 1641 |
+
converted_results.append(result_entry)
|
| 1642 |
+
else:
|
| 1643 |
+
for gt_idx in unmatched_gt_indices:
|
| 1644 |
+
result_entry = {
|
| 1645 |
+
'gt_idx': int(gt_idx),
|
| 1646 |
+
'gt': norm_gt_lines[gt_idx],
|
| 1647 |
+
'pred_idx': "",
|
| 1648 |
+
'pred': '',
|
| 1649 |
+
'edit': 1
|
| 1650 |
+
}
|
| 1651 |
+
converted_results.append(result_entry)
|
| 1652 |
+
|
| 1653 |
+
return converted_results
|
| 1654 |
+
|
| 1655 |
+
import json
|
| 1656 |
+
|
| 1657 |
+
def read_md_file(filepath):
|
| 1658 |
+
with open(filepath, 'r', encoding='utf-8') as file:
|
| 1659 |
+
content = file.read()
|
| 1660 |
+
|
| 1661 |
+
return content
|
| 1662 |
+
|
| 1663 |
+
def save_paired_result(preds, gts, save_path):
|
| 1664 |
+
save_result = []
|
| 1665 |
+
formula_id = 0
|
| 1666 |
+
for gt, pred in zip(gts, preds):
|
| 1667 |
+
save_result.append({
|
| 1668 |
+
"gt": gt,
|
| 1669 |
+
"pred": pred,
|
| 1670 |
+
"img_id": formula_id
|
| 1671 |
+
})
|
| 1672 |
+
formula_id += 1
|
| 1673 |
+
with open(save_path, 'w', encoding='utf-8') as f:
|
| 1674 |
+
json.dump(save_result, f, indent=4, ensure_ascii=False)
|
| 1675 |
+
|
| 1676 |
+
|
| 1677 |
+
import matplotlib.pyplot as plt
|
| 1678 |
+
import numpy as np
|
| 1679 |
+
import os
|
| 1680 |
+
import re
|
| 1681 |
+
import matplotlib.font_manager as fm
|
| 1682 |
+
font = fm.FontProperties(fname=r'font/SimHei.ttf')
|
| 1683 |
+
|
| 1684 |
+
|
| 1685 |
+
def print_aligned_dict(data):
|
| 1686 |
+
# Find the maximum length of all keys
|
| 1687 |
+
max_key_length = max(len(key) for key in data['testcase1'])
|
| 1688 |
+
|
| 1689 |
+
# Print header
|
| 1690 |
+
print(f"{' ' * (max_key_length + 4)}", end="")
|
| 1691 |
+
for key in data:
|
| 1692 |
+
print(f"{key:>{max_key_length}}", end="")
|
| 1693 |
+
print()
|
| 1694 |
+
|
| 1695 |
+
# Print dictionary content
|
| 1696 |
+
for subkey in data['testcase1']:
|
| 1697 |
+
print(f"{subkey:<{max_key_length + 4}}", end="")
|
| 1698 |
+
for key in data:
|
| 1699 |
+
print(f"{data[key][subkey]:>{max_key_length}}", end="")
|
| 1700 |
+
print()
|
| 1701 |
+
def create_dict_from_folders(directory):
|
| 1702 |
+
body = {}
|
| 1703 |
+
for folder_name in os.listdir(directory):
|
| 1704 |
+
folder_path = os.path.join(directory, folder_name)
|
| 1705 |
+
if os.path.isdir(folder_path):
|
| 1706 |
+
body[folder_name] = {}
|
| 1707 |
+
return body
|
| 1708 |
+
|
| 1709 |
+
|
| 1710 |
+
def create_radar_chart(df, title, filename):
|
| 1711 |
+
labels = df.columns
|
| 1712 |
+
|
| 1713 |
+
# Calculate angles
|
| 1714 |
+
angles = np.linspace(0, 2 * np.pi, len(labels), endpoint=False).tolist()
|
| 1715 |
+
angles += angles[:1]
|
| 1716 |
+
|
| 1717 |
+
# Initialize radar chart
|
| 1718 |
+
fig, ax = plt.subplots(figsize=(10, 6), subplot_kw=dict(polar=True), dpi=200)
|
| 1719 |
+
# ax.spines['polar'].set_visible(False)
|
| 1720 |
+
|
| 1721 |
+
# Draw radar chart for each dataset
|
| 1722 |
+
for index, row in df.iterrows():
|
| 1723 |
+
values = row.tolist()
|
| 1724 |
+
values += values[:1]
|
| 1725 |
+
ax.fill(angles, values, alpha=0.1)
|
| 1726 |
+
ax.plot(angles, values, label=index)
|
| 1727 |
+
|
| 1728 |
+
# Add percentage labels next to each data point
|
| 1729 |
+
for angle, value in zip(angles, values):
|
| 1730 |
+
ax.text(angle, value, '{:.1%}'.format(value), ha='center', va='center', fontsize=7, alpha=0.7)
|
| 1731 |
+
|
| 1732 |
+
# Set labels
|
| 1733 |
+
ax.set_yticklabels([])
|
| 1734 |
+
ax.set_xticks(angles[:-1])
|
| 1735 |
+
ax.set_xticklabels(labels, fontproperties=font)
|
| 1736 |
+
ax.spines['polar'].set_visible(False) # Hide the outermost circle
|
| 1737 |
+
ax.grid(False)
|
| 1738 |
+
for j in np.arange(0, 1.2, 0.2):
|
| 1739 |
+
ax.plot(angles, len(values) * [j], '-.', lw=0.5, color='black', alpha=0.5)
|
| 1740 |
+
for j in range(len(values)):
|
| 1741 |
+
ax.plot([angles[j], angles[j]], [0, 1], '-.', lw=0.5, color='black', alpha=0.5)
|
| 1742 |
+
|
| 1743 |
+
# Add title and legend
|
| 1744 |
+
plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
|
| 1745 |
+
|
| 1746 |
+
ax.tick_params(pad=30)
|
| 1747 |
+
ax.set_theta_zero_location('N')
|
| 1748 |
+
# Save chart to file
|
| 1749 |
+
plt.savefig(filename)
|
| 1750 |
+
|
| 1751 |
+
# The function is from https://github.com/intsig-textin/markdown_tester
|
| 1752 |
+
def markdown_to_html(markdown_table):
|
| 1753 |
+
rows = [row.strip() for row in markdown_table.strip().split('\n')]
|
| 1754 |
+
num_columns = len(rows[0].split('|')) - 2
|
| 1755 |
+
|
| 1756 |
+
html_table = '<table>\n <thead>\n <tr>\n'
|
| 1757 |
+
|
| 1758 |
+
header_cells = [cell.strip() for cell in rows[0].split('|')[1:-1]]
|
| 1759 |
+
for cell in header_cells:
|
| 1760 |
+
html_table += f' <th>{cell}</th>\n'
|
| 1761 |
+
html_table += ' </tr>\n </thead>\n <tbody>\n'
|
| 1762 |
+
|
| 1763 |
+
for row in rows[2:]:
|
| 1764 |
+
cells = [cell.strip() for cell in row.split('|')[1:-1]]
|
| 1765 |
+
html_table += ' <tr>\n'
|
| 1766 |
+
for cell in cells:
|
| 1767 |
+
html_table += f' <td>{cell}</td>\n'
|
| 1768 |
+
html_table += ' </tr>\n'
|
| 1769 |
+
|
| 1770 |
+
html_table += ' </tbody>\n</table>\n'
|
| 1771 |
+
return html_table
|
| 1772 |
+
def convert_markdown_to_html(self, markdown_content, md_type):
|
| 1773 |
+
# Define a regex pattern to find Markdown tables with newlines
|
| 1774 |
+
markdown_content = markdown_content.replace('\r', '')
|
| 1775 |
+
pattern = re.compile(r'\|\s*.*?\s*\|\n', re.DOTALL)
|
| 1776 |
+
|
| 1777 |
+
# Find all matches in the Markdown content
|
| 1778 |
+
matches = pattern.findall(markdown_content)
|
| 1779 |
+
for match in matches:
|
| 1780 |
+
html_table = markdown_to_html(match)
|
| 1781 |
+
markdown_content = markdown_content.replace(match, html_table, 1) # Only replace the first occurrence
|
| 1782 |
+
res_html = convert_table(replace_table_with_placeholder(markdown_content))
|
| 1783 |
+
|
| 1784 |
+
return res_html
|
| 1785 |
+
def convert_table_str(s):
|
| 1786 |
+
s = re.sub(r'<table.*?>','<table>',s)
|
| 1787 |
+
s = re.sub(r'<th','<td',s)
|
| 1788 |
+
s = re.sub(r'</th>','</td>',s)
|
| 1789 |
+
# s = re.sub(r'<td rowspan="(.)">',lambda x:f'<td colspan="1" rowspan="{x.group(1)}">',s)
|
| 1790 |
+
# s = re.sub(r'<td colspan="(.)">',lambda x:f'<td colspan="{x.group(1)}" rowspan="1">',s)
|
| 1791 |
+
res = ''
|
| 1792 |
+
res += '\n\n'
|
| 1793 |
+
temp_item = ''
|
| 1794 |
+
for c in s:
|
| 1795 |
+
temp_item += c
|
| 1796 |
+
if c == '>' and not re.search(r'<td.*?>\$',temp_item):
|
| 1797 |
+
res += temp_item+'\n'
|
| 1798 |
+
temp_item = ''
|
| 1799 |
+
return res+'\n'
|
| 1800 |
+
def merge_table(md):
|
| 1801 |
+
table_temp = ''
|
| 1802 |
+
for line in md:
|
| 1803 |
+
table_temp += line
|
| 1804 |
+
return convert_table_str(table_temp)
|
| 1805 |
+
def find_md_table_mode(line):
|
| 1806 |
+
if re.search(r'-*?:',line) or re.search(r'---',line) or re.search(r':-*?',line):
|
| 1807 |
+
return True
|
| 1808 |
+
return False
|
| 1809 |
+
def delete_table_and_body(input_list):
|
| 1810 |
+
res = []
|
| 1811 |
+
for line in input_list:
|
| 1812 |
+
if not re.search(r'</?t(able|head|body)>',line):
|
| 1813 |
+
res.append(line)
|
| 1814 |
+
return res
|
| 1815 |
+
def merge_tables(input_str):
|
| 1816 |
+
# Delete HTML comments
|
| 1817 |
+
input_str = re.sub(r'<!--[\s\S]*?-->', '', input_str)
|
| 1818 |
+
|
| 1819 |
+
# Use regex to find each <table> block
|
| 1820 |
+
table_blocks = re.findall(r'<table>[\s\S]*?</table>', input_str)
|
| 1821 |
+
|
| 1822 |
+
# Process each <table> block, replace <th> with <td>
|
| 1823 |
+
output_lines = []
|
| 1824 |
+
for block in table_blocks:
|
| 1825 |
+
block_lines = block.split('\n')
|
| 1826 |
+
for i, line in enumerate(block_lines):
|
| 1827 |
+
if '<th>' in line:
|
| 1828 |
+
block_lines[i] = line.replace('<th>', '<td>').replace('</th>', '</td>')
|
| 1829 |
+
final_tr = delete_table_and_body(block_lines)
|
| 1830 |
+
if len(final_tr) > 2:
|
| 1831 |
+
output_lines.extend(final_tr) # Ignore <table> and </table> tags, keep only table content
|
| 1832 |
+
|
| 1833 |
+
# Rejoin the processed strings
|
| 1834 |
+
merged_output = '<table>\n{}\n</table>'.format('\n'.join(output_lines))
|
| 1835 |
+
|
| 1836 |
+
return "\n\n" + merged_output + "\n\n"
|
| 1837 |
+
|
| 1838 |
+
def replace_table_with_placeholder(input_string):
|
| 1839 |
+
lines = input_string.split('\n')
|
| 1840 |
+
output_lines = []
|
| 1841 |
+
|
| 1842 |
+
in_table_block = False
|
| 1843 |
+
temp_block = ""
|
| 1844 |
+
last_line = ""
|
| 1845 |
+
|
| 1846 |
+
org_table_list = []
|
| 1847 |
+
in_org_table = False
|
| 1848 |
+
|
| 1849 |
+
for idx, line in enumerate(lines):
|
| 1850 |
+
# if not in_org_table:
|
| 1851 |
+
# if "<table>" not in last_line and in_table_block == False and temp_block != "":
|
| 1852 |
+
# output_lines.append(merge_tables(temp_block))
|
| 1853 |
+
# temp_block = ""
|
| 1854 |
+
if "<table>" in line:
|
| 1855 |
+
# if "<table><tr" in line:
|
| 1856 |
+
# org_table_list.append(line)
|
| 1857 |
+
# in_org_table = True
|
| 1858 |
+
# output_lines.append(last_line)
|
| 1859 |
+
# continue
|
| 1860 |
+
# else:
|
| 1861 |
+
in_table_block = True
|
| 1862 |
+
temp_block += last_line
|
| 1863 |
+
elif in_table_block:
|
| 1864 |
+
if not find_md_table_mode(last_line) and "</thead>" not in last_line:
|
| 1865 |
+
temp_block += "\n" + last_line
|
| 1866 |
+
if "</table>" in last_line:
|
| 1867 |
+
if "<table>" not in line:
|
| 1868 |
+
in_table_block = False
|
| 1869 |
+
output_lines.append(merge_tables(temp_block))
|
| 1870 |
+
temp_block = ""
|
| 1871 |
+
else:
|
| 1872 |
+
output_lines.append(last_line)
|
| 1873 |
+
|
| 1874 |
+
last_line = line
|
| 1875 |
+
# else:
|
| 1876 |
+
# org_table_list.append(line)
|
| 1877 |
+
# if "</table" in line:
|
| 1878 |
+
# in_org_table = False
|
| 1879 |
+
# last_line = merge_table(org_table_list)
|
| 1880 |
+
# org_table_list = []
|
| 1881 |
+
|
| 1882 |
+
if last_line:
|
| 1883 |
+
if in_table_block or "</table>" in last_line:
|
| 1884 |
+
temp_block += "\n" + last_line
|
| 1885 |
+
output_lines.append(merge_tables(temp_block))
|
| 1886 |
+
else:
|
| 1887 |
+
output_lines.append(last_line)
|
| 1888 |
+
# if "</table>" in last_line:
|
| 1889 |
+
# output_lines.append(merge_tables(temp_block))
|
| 1890 |
+
|
| 1891 |
+
return '\n'.join(output_lines)
|
| 1892 |
+
|
| 1893 |
+
def convert_table(input_str):
|
| 1894 |
+
# Replace <table>
|
| 1895 |
+
output_str = input_str.replace("<table>", "<table border=\"1\" >")
|
| 1896 |
+
|
| 1897 |
+
# Replace <td>
|
| 1898 |
+
output_str = output_str.replace("<td>", "<td colspan=\"1\" rowspan=\"1\">")
|
| 1899 |
+
|
| 1900 |
+
return output_str
|
| 1901 |
+
|
| 1902 |
+
def convert_markdown_to_html(markdown_content):
|
| 1903 |
+
# Define a regex pattern to find Markdown tables with newlines
|
| 1904 |
+
markdown_content = markdown_content.replace('\r', '')+'\n'
|
| 1905 |
+
pattern = re.compile(r'\|\s*.*?\s*\|\n', re.DOTALL)
|
| 1906 |
+
|
| 1907 |
+
# Find all matches in the Markdown content
|
| 1908 |
+
matches = pattern.findall(markdown_content)
|
| 1909 |
+
|
| 1910 |
+
for match in matches:
|
| 1911 |
+
html_table = markdown_to_html(match)
|
| 1912 |
+
markdown_content = markdown_content.replace(match, html_table, 1) # Only replace the first occurrence
|
| 1913 |
+
|
| 1914 |
+
res_html = convert_table(replace_table_with_placeholder(markdown_content))
|
| 1915 |
+
|
| 1916 |
+
return res_html
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/IoUscore_metric.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import ast
|
| 5 |
+
import ipdb
|
| 6 |
+
from vlmeval.dataset.utils.Ocrbench_v2.vqa_metric import vqa_evaluation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def calculate_iou(box1, box2):
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
box1 = [int(coordinate) for coordinate in box1]
|
| 13 |
+
box2 = [int(coordinate) for coordinate in box2]
|
| 14 |
+
except:
|
| 15 |
+
return 0
|
| 16 |
+
|
| 17 |
+
x1_inter = max(box1[0], box2[0])
|
| 18 |
+
y1_inter = max(box1[1], box2[1])
|
| 19 |
+
x2_inter = min(box1[2], box2[2])
|
| 20 |
+
y2_inter = min(box1[3], box2[3])
|
| 21 |
+
inter_area = max(0, x2_inter - x1_inter) * max(0, y2_inter - y1_inter)
|
| 22 |
+
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
| 23 |
+
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
| 24 |
+
union_area = box1_area + box2_area - inter_area
|
| 25 |
+
iou = inter_area / union_area if union_area != 0 else 0
|
| 26 |
+
return iou
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def vqa_with_position_evaluation(predict, img_metas):
|
| 30 |
+
|
| 31 |
+
score_content, score_bbox = .0, .0
|
| 32 |
+
if "answer" in predict.keys():
|
| 33 |
+
score_content = vqa_evaluation(predict["answer"], img_metas["answers"])
|
| 34 |
+
if "bbox" in predict.keys():
|
| 35 |
+
gt_bbox = img_metas["bbox"]
|
| 36 |
+
try:
|
| 37 |
+
predict_bbox_list = ast.literal_eval(predict["bbox"])
|
| 38 |
+
score_bbox = calculate_iou(predict_bbox_list, gt_bbox)
|
| 39 |
+
except:
|
| 40 |
+
score_bbox = 0
|
| 41 |
+
return 0.5 * score_content + 0.5 * score_bbox
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def extract_coordinates(text):
|
| 45 |
+
# Regex pattern to match coordinates in either (x1, y1, x2, y2) or [x1, y1, x2, y2] format
|
| 46 |
+
|
| 47 |
+
pattern = r'[\(\[]\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*[\)\]]'
|
| 48 |
+
|
| 49 |
+
matches = list(re.finditer(pattern, text))
|
| 50 |
+
coords_list = []
|
| 51 |
+
coords_set = set()
|
| 52 |
+
for match in matches:
|
| 53 |
+
|
| 54 |
+
x1, y1, x2, y2 = map(int, match.groups())
|
| 55 |
+
|
| 56 |
+
if all(0 <= n <= 1000 for n in [x1, y1, x2, y2]):
|
| 57 |
+
coords = (x1, y1, x2, y2)
|
| 58 |
+
|
| 59 |
+
if coords in coords_set:
|
| 60 |
+
coords_list = [c for c in coords_list if c != coords]
|
| 61 |
+
|
| 62 |
+
coords_list.append(coords)
|
| 63 |
+
coords_set.add(coords)
|
| 64 |
+
if coords_list:
|
| 65 |
+
last_coords = coords_list[-1]
|
| 66 |
+
return list(last_coords)
|
| 67 |
+
else:
|
| 68 |
+
return None
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
if __name__ == "__main__":
|
| 72 |
+
|
| 73 |
+
print("Example for Text Grounding task.")
|
| 74 |
+
box1 = [50, 50, 150, 150]
|
| 75 |
+
box2 = [60, 60, 140, 140]
|
| 76 |
+
iou_score = calculate_iou(box1, box2)
|
| 77 |
+
print(f"IoU score: {iou_score}")
|
| 78 |
+
|
| 79 |
+
print("Example for VQA with position task.")
|
| 80 |
+
pred = {"content": "The content is Hello Buddies", "bbox": box1}
|
| 81 |
+
gt = {"content": "Hello Buddies", "bbox": box2}
|
| 82 |
+
|
| 83 |
+
vqa_score = vqa_evaluation(pred["content"], gt["content"])
|
| 84 |
+
iou_score = calculate_iou(pred["bbox"], gt["bbox"])
|
| 85 |
+
|
| 86 |
+
print(f"VQA score: {vqa_score}")
|
| 87 |
+
print(f"IoU score: {iou_score}")
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/TEDS_metric.py
ADDED
|
@@ -0,0 +1,930 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
# Copyright 2020 IBM
|
| 3 |
+
# Author: peter.zhong@au1.ibm.com
|
| 4 |
+
#
|
| 5 |
+
# This is free software; you can redistribute it and/or modify
|
| 6 |
+
# it under the terms of the Apache 2.0 License.
|
| 7 |
+
#
|
| 8 |
+
# This software is distributed in the hope that it will be useful,
|
| 9 |
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
| 10 |
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
| 11 |
+
# Apache 2.0 License for more details.
|
| 12 |
+
|
| 13 |
+
import re
|
| 14 |
+
import ast
|
| 15 |
+
import json
|
| 16 |
+
import ipdb
|
| 17 |
+
import distance
|
| 18 |
+
from apted import APTED, Config
|
| 19 |
+
from itertools import product
|
| 20 |
+
from apted.helpers import Tree
|
| 21 |
+
from lxml import etree, html
|
| 22 |
+
from collections import deque
|
| 23 |
+
from vlmeval.dataset.utils.Ocrbench_v2.parallel import parallel_process
|
| 24 |
+
from tqdm import tqdm
|
| 25 |
+
from zss import simple_distance, Node
|
| 26 |
+
import string
|
| 27 |
+
from typing import Any, Callable, Optional, Sequence
|
| 28 |
+
import numpy as np
|
| 29 |
+
import Levenshtein
|
| 30 |
+
import editdistance
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TableTree(Tree):
|
| 34 |
+
def __init__(self, tag, colspan=None, rowspan=None, content=None, *children):
|
| 35 |
+
self.tag = tag
|
| 36 |
+
self.colspan = colspan
|
| 37 |
+
self.rowspan = rowspan
|
| 38 |
+
self.content = content
|
| 39 |
+
self.children = list(children)
|
| 40 |
+
|
| 41 |
+
def bracket(self):
|
| 42 |
+
"""Show tree using brackets notation"""
|
| 43 |
+
if self.tag == 'td':
|
| 44 |
+
result = '"tag": %s, "colspan": %d, "rowspan": %d, "text": %s' % \
|
| 45 |
+
(self.tag, self.colspan, self.rowspan, self.content)
|
| 46 |
+
else:
|
| 47 |
+
result = '"tag": %s' % self.tag
|
| 48 |
+
for child in self.children:
|
| 49 |
+
result += child.bracket()
|
| 50 |
+
return "{{{}}}".format(result)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class CustomConfig(Config):
|
| 54 |
+
@staticmethod
|
| 55 |
+
def maximum(*sequences):
|
| 56 |
+
"""Get maximum possible value
|
| 57 |
+
"""
|
| 58 |
+
return max(map(len, sequences))
|
| 59 |
+
|
| 60 |
+
def normalized_distance(self, *sequences):
|
| 61 |
+
"""Get distance from 0 to 1
|
| 62 |
+
"""
|
| 63 |
+
return float(distance.levenshtein(*sequences)) / self.maximum(*sequences)
|
| 64 |
+
|
| 65 |
+
def rename(self, node1, node2):
|
| 66 |
+
"""Compares attributes of trees"""
|
| 67 |
+
if (node1.tag != node2.tag) or (node1.colspan != node2.colspan) or (node1.rowspan != node2.rowspan):
|
| 68 |
+
return 1.
|
| 69 |
+
if node1.tag == 'td':
|
| 70 |
+
if node1.content or node2.content:
|
| 71 |
+
return self.normalized_distance(node1.content, node2.content)
|
| 72 |
+
return 0.
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class TEDS(object):
|
| 76 |
+
''' Tree Edit Distance basead Similarity
|
| 77 |
+
'''
|
| 78 |
+
def __init__(self, structure_only=False, n_jobs=1, ignore_nodes=None):
|
| 79 |
+
assert isinstance(n_jobs, int) and (n_jobs >= 1), 'n_jobs must be an integer greather than 1'
|
| 80 |
+
self.structure_only = structure_only
|
| 81 |
+
self.n_jobs = n_jobs
|
| 82 |
+
self.ignore_nodes = ignore_nodes
|
| 83 |
+
self.__tokens__ = []
|
| 84 |
+
|
| 85 |
+
def tokenize(self, node):
|
| 86 |
+
''' Tokenizes table cells
|
| 87 |
+
'''
|
| 88 |
+
self.__tokens__.append('<%s>' % node.tag)
|
| 89 |
+
if node.text is not None:
|
| 90 |
+
self.__tokens__ += list(node.text)
|
| 91 |
+
for n in node.getchildren():
|
| 92 |
+
self.tokenize(n)
|
| 93 |
+
if node.tag != 'unk':
|
| 94 |
+
self.__tokens__.append('</%s>' % node.tag)
|
| 95 |
+
if node.tag != 'td' and node.tail is not None:
|
| 96 |
+
self.__tokens__ += list(node.tail)
|
| 97 |
+
|
| 98 |
+
def load_html_tree(self, node, parent=None):
|
| 99 |
+
''' Converts HTML tree to the format required by apted
|
| 100 |
+
'''
|
| 101 |
+
global __tokens__
|
| 102 |
+
if node.tag == 'td':
|
| 103 |
+
if self.structure_only:
|
| 104 |
+
cell = []
|
| 105 |
+
else:
|
| 106 |
+
self.__tokens__ = []
|
| 107 |
+
self.tokenize(node)
|
| 108 |
+
cell = self.__tokens__[1:-1].copy()
|
| 109 |
+
new_node = TableTree(node.tag,
|
| 110 |
+
int(node.attrib.get('colspan', '1')),
|
| 111 |
+
int(node.attrib.get('rowspan', '1')),
|
| 112 |
+
cell, *deque())
|
| 113 |
+
else:
|
| 114 |
+
new_node = TableTree(node.tag, None, None, None, *deque())
|
| 115 |
+
if parent is not None:
|
| 116 |
+
parent.children.append(new_node)
|
| 117 |
+
if node.tag != 'td':
|
| 118 |
+
for n in node.getchildren():
|
| 119 |
+
self.load_html_tree(n, new_node)
|
| 120 |
+
if parent is None:
|
| 121 |
+
return new_node
|
| 122 |
+
|
| 123 |
+
def evaluate(self, pred, true):
|
| 124 |
+
''' Computes TEDS score between the prediction and the ground truth of a
|
| 125 |
+
given sample
|
| 126 |
+
'''
|
| 127 |
+
if (not pred) or (not true):
|
| 128 |
+
return 0.0
|
| 129 |
+
parser = html.HTMLParser(remove_comments=True, encoding='utf-8')
|
| 130 |
+
pred = html.fromstring(pred, parser=parser)
|
| 131 |
+
true = html.fromstring(true, parser=parser)
|
| 132 |
+
if pred.xpath('body/table') and true.xpath('body/table'):
|
| 133 |
+
pred = pred.xpath('body/table')[0]
|
| 134 |
+
true = true.xpath('body/table')[0]
|
| 135 |
+
if self.ignore_nodes:
|
| 136 |
+
etree.strip_tags(pred, *self.ignore_nodes)
|
| 137 |
+
etree.strip_tags(true, *self.ignore_nodes)
|
| 138 |
+
n_nodes_pred = len(pred.xpath(".//*"))
|
| 139 |
+
n_nodes_true = len(true.xpath(".//*"))
|
| 140 |
+
n_nodes = max(n_nodes_pred, n_nodes_true)
|
| 141 |
+
tree_pred = self.load_html_tree(pred)
|
| 142 |
+
tree_true = self.load_html_tree(true)
|
| 143 |
+
distance = APTED(tree_pred, tree_true, CustomConfig()).compute_edit_distance()
|
| 144 |
+
return 1.0 - (float(distance) / n_nodes)
|
| 145 |
+
else:
|
| 146 |
+
return 0.0
|
| 147 |
+
|
| 148 |
+
def batch_evaluate(self, pred_json, true_json):
|
| 149 |
+
''' Computes TEDS score between the prediction and the ground truth of
|
| 150 |
+
a batch of samples
|
| 151 |
+
@params pred_json: {'FILENAME': 'HTML CODE', ...}
|
| 152 |
+
@params true_json: {'FILENAME': {'html': 'HTML CODE'}, ...}
|
| 153 |
+
@output: {'FILENAME': 'TEDS SCORE', ...}
|
| 154 |
+
'''
|
| 155 |
+
samples = true_json.keys()
|
| 156 |
+
if self.n_jobs == 1:
|
| 157 |
+
scores = [self.evaluate(pred_json.get(filename, ''), true_json[filename]['html']) for filename in tqdm(samples)]
|
| 158 |
+
else:
|
| 159 |
+
#inputs = [{'pred': pred_json.get(filename, ''), 'true': true_json[filename]['html']} for filename in samples]
|
| 160 |
+
inputs = [{'pred': pred_json.get(filename, ''), 'true': true_json[filename]} for filename in samples]
|
| 161 |
+
scores = parallel_process(inputs, self.evaluate, use_kwargs=True, n_jobs=self.n_jobs, front_num=1)
|
| 162 |
+
scores = dict(zip(samples, scores))
|
| 163 |
+
return scores
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def convert_table_to_html_str(table_row_list=[]):
|
| 167 |
+
"""
|
| 168 |
+
Given a list of table rows, build the corresponding html string, which is used to compute the TEDS score.
|
| 169 |
+
We use the official code of PubTabNet to compute TEDS score, it does not consider '<th>' label.
|
| 170 |
+
We also remove unneccessary spaces within a table cell and extra '\n' as they will influence the TEDS score.
|
| 171 |
+
"""
|
| 172 |
+
html_table_str = "<html><body><table>" + '\n'
|
| 173 |
+
for data_row in table_row_list:
|
| 174 |
+
html_table_str += "<tr>"
|
| 175 |
+
for cell_str in data_row:
|
| 176 |
+
html_table_str += f"<td>{cell_str}</td>"
|
| 177 |
+
html_table_str += "</tr>"
|
| 178 |
+
html_table_str += '\n'
|
| 179 |
+
html_table_str += "</table></body></html>"
|
| 180 |
+
html_table_str = html_table_str.replace('\n','')
|
| 181 |
+
return html_table_str
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def convert_markdown_table_to_html(markdown_table):
|
| 185 |
+
"""
|
| 186 |
+
Converts a markdown table to the corresponding html string for TEDS computation.
|
| 187 |
+
"""
|
| 188 |
+
# remove extra code block tokens like '```markdown' and '```
|
| 189 |
+
markdown_table = markdown_table.strip('```markdown').strip('```').strip()
|
| 190 |
+
row_str_list = markdown_table.split('\n')
|
| 191 |
+
# extra the first header row and other data rows
|
| 192 |
+
valid_row_str_list = [row_str_list[0]]+row_str_list[2:]
|
| 193 |
+
table_rows = []
|
| 194 |
+
for row_str in valid_row_str_list:
|
| 195 |
+
one_row = []
|
| 196 |
+
for cell in row_str.strip().split('|')[1:-1]:
|
| 197 |
+
if set(cell) != set(' '):
|
| 198 |
+
one_row.append(cell.strip())
|
| 199 |
+
else:
|
| 200 |
+
one_row.append(' ')
|
| 201 |
+
table_rows.append(one_row)
|
| 202 |
+
# build html string based on table rows
|
| 203 |
+
html_str = convert_table_to_html_str(table_rows)
|
| 204 |
+
return html_str
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def dict_to_html(data):
|
| 208 |
+
html = "<html><body><table>\n"
|
| 209 |
+
for key, value in data.items():
|
| 210 |
+
if not isinstance(value, str):
|
| 211 |
+
value = str(value)
|
| 212 |
+
value_str = ' '.join(value)
|
| 213 |
+
|
| 214 |
+
html += f" <tr><td>{key}</td><td>{value_str}</td></tr>\n"
|
| 215 |
+
html += "</table></body></html>"
|
| 216 |
+
return html
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def convert_str_to_dict(predict_str: str):
|
| 220 |
+
"""
|
| 221 |
+
Parses the 'predict' string and returns a dictionary.
|
| 222 |
+
Missing or unparseable content is handled gracefully.
|
| 223 |
+
|
| 224 |
+
Parameters:
|
| 225 |
+
- predict_str (str): The prediction string containing the output dict.
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
- dict: A dictionary extracted from the predict string.
|
| 229 |
+
"""
|
| 230 |
+
# Remove code fences like ```python\n...\n```
|
| 231 |
+
code_fence_pattern = r'```(?:python|json)?\n(.*?)\n```'
|
| 232 |
+
match = re.search(code_fence_pattern, predict_str, re.DOTALL | re.IGNORECASE)
|
| 233 |
+
if match:
|
| 234 |
+
content = match.group(1)
|
| 235 |
+
else:
|
| 236 |
+
content = predict_str.strip()
|
| 237 |
+
|
| 238 |
+
data = {}
|
| 239 |
+
success = False
|
| 240 |
+
|
| 241 |
+
# try parsing with JSON
|
| 242 |
+
try:
|
| 243 |
+
data = json.loads(content)
|
| 244 |
+
success = True
|
| 245 |
+
except json.JSONDecodeError:
|
| 246 |
+
pass
|
| 247 |
+
|
| 248 |
+
# try parsing with ast.literal_eval
|
| 249 |
+
if not success:
|
| 250 |
+
try:
|
| 251 |
+
data = ast.literal_eval(content)
|
| 252 |
+
if isinstance(data, dict):
|
| 253 |
+
success = True
|
| 254 |
+
except (ValueError, SyntaxError):
|
| 255 |
+
pass
|
| 256 |
+
|
| 257 |
+
# try parsing with regex
|
| 258 |
+
if not success:
|
| 259 |
+
key_value_pattern = r'["\']?([\w\s]+)["\']?\s*[:=]\s*["\']?([^\n,"\'{}]+)["\']?'
|
| 260 |
+
matches = re.findall(key_value_pattern, content)
|
| 261 |
+
try:
|
| 262 |
+
for key, value in matches:
|
| 263 |
+
data[key.strip()] = value.strip()
|
| 264 |
+
except:
|
| 265 |
+
return {}
|
| 266 |
+
|
| 267 |
+
if not data:
|
| 268 |
+
return {}
|
| 269 |
+
|
| 270 |
+
try:
|
| 271 |
+
result = {k.strip(): str(v).strip() for k, v in data.items()}
|
| 272 |
+
except:
|
| 273 |
+
return {}
|
| 274 |
+
return result
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def convert_str_to_multi_dict(predict_str: str):
|
| 278 |
+
"""
|
| 279 |
+
Parses the 'predict' string and returns a dictionary.
|
| 280 |
+
Handles nested dictionaries and missing or unparseable content gracefully.
|
| 281 |
+
|
| 282 |
+
Parameters:
|
| 283 |
+
- predict_str (str): The prediction string containing the output dict.
|
| 284 |
+
|
| 285 |
+
Returns:
|
| 286 |
+
- dict: A dictionary extracted from the predict string.
|
| 287 |
+
"""
|
| 288 |
+
# Remove code fences like ```python\n...\n```
|
| 289 |
+
code_fence_pattern = r'```(?:python|json)?\n(.*?)\n```'
|
| 290 |
+
matches = re.findall(code_fence_pattern, predict_str, re.DOTALL | re.IGNORECASE)
|
| 291 |
+
if matches:
|
| 292 |
+
content = max(matches, key=len)
|
| 293 |
+
else:
|
| 294 |
+
content = predict_str.strip()
|
| 295 |
+
|
| 296 |
+
def strip_variable_assignment(s):
|
| 297 |
+
variable_assignment_pattern = r'^\s*\w+\s*=\s*'
|
| 298 |
+
return re.sub(variable_assignment_pattern, '', s.strip(), count=1)
|
| 299 |
+
|
| 300 |
+
content = strip_variable_assignment(content)
|
| 301 |
+
|
| 302 |
+
def remove_comments(s):
|
| 303 |
+
return re.sub(r'#.*', '', s)
|
| 304 |
+
|
| 305 |
+
content = remove_comments(content)
|
| 306 |
+
|
| 307 |
+
last_brace_pos = content.rfind('}')
|
| 308 |
+
if last_brace_pos != -1:
|
| 309 |
+
content = content[:last_brace_pos+1]
|
| 310 |
+
|
| 311 |
+
data = {}
|
| 312 |
+
success = False
|
| 313 |
+
|
| 314 |
+
# try parsing with ast.literal_eval
|
| 315 |
+
try:
|
| 316 |
+
data = ast.literal_eval(content)
|
| 317 |
+
if isinstance(data, dict):
|
| 318 |
+
success = True
|
| 319 |
+
except (ValueError, SyntaxError, TypeError):
|
| 320 |
+
pass
|
| 321 |
+
|
| 322 |
+
if not success:
|
| 323 |
+
return {}
|
| 324 |
+
|
| 325 |
+
def process_data(obj):
|
| 326 |
+
if isinstance(obj, dict):
|
| 327 |
+
return {k: process_data(v) for k, v in obj.items()}
|
| 328 |
+
elif isinstance(obj, list):
|
| 329 |
+
return [process_data(elem) for elem in obj]
|
| 330 |
+
else:
|
| 331 |
+
return obj
|
| 332 |
+
|
| 333 |
+
data = process_data(data)
|
| 334 |
+
|
| 335 |
+
return data
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def generate_combinations(input_dict):
|
| 339 |
+
"""
|
| 340 |
+
Function to generate all possible combinations of values from a dictionary.
|
| 341 |
+
"""
|
| 342 |
+
kie_answer = input_dict
|
| 343 |
+
if not isinstance(kie_answer, dict):
|
| 344 |
+
kie_answer = kie_answer.strip('"')
|
| 345 |
+
try:
|
| 346 |
+
kie_answer = json.loads(kie_answer)
|
| 347 |
+
except json.JSONDecodeError:
|
| 348 |
+
try:
|
| 349 |
+
kie_answer = ast.literal_eval(kie_answer)
|
| 350 |
+
if not isinstance(kie_answer, dict):
|
| 351 |
+
kie_answer = ast.literal_eval(kie_answer)
|
| 352 |
+
except (ValueError, SyntaxError):
|
| 353 |
+
print(f"Unable to parse 'answers' field: {kie_answer}")
|
| 354 |
+
return {}
|
| 355 |
+
|
| 356 |
+
# Ensure the parsed result is a dictionary.
|
| 357 |
+
if not isinstance(kie_answer, dict):
|
| 358 |
+
print("Parsed 'answers' is still not a dictionary.")
|
| 359 |
+
raise ValueError("Input could not be parsed into a dictionary.")
|
| 360 |
+
|
| 361 |
+
keys = list(kie_answer.keys())
|
| 362 |
+
|
| 363 |
+
value_lists = []
|
| 364 |
+
for single_key in keys:
|
| 365 |
+
sinlge_value = kie_answer[single_key]
|
| 366 |
+
if not isinstance(sinlge_value, list):
|
| 367 |
+
sinlge_value = [sinlge_value]
|
| 368 |
+
value_lists.append(sinlge_value)
|
| 369 |
+
|
| 370 |
+
# Compute the Cartesian product of the value lists.
|
| 371 |
+
combinations = list(product(*value_lists))
|
| 372 |
+
|
| 373 |
+
# Create a dictionary for each combination of values.
|
| 374 |
+
result = [dict(zip(keys, values)) for values in combinations]
|
| 375 |
+
|
| 376 |
+
return result
|
| 377 |
+
|
| 378 |
+
else:
|
| 379 |
+
keys = list(input_dict.keys())
|
| 380 |
+
value_lists = [input_dict[key] for key in keys]
|
| 381 |
+
|
| 382 |
+
# Compute the Cartesian product of the value lists.
|
| 383 |
+
combinations = list(product(*value_lists))
|
| 384 |
+
|
| 385 |
+
# Create a dictionary for each combination of values.
|
| 386 |
+
result = [dict(zip(keys, values)) for values in combinations]
|
| 387 |
+
|
| 388 |
+
return result
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def compute_f1_score(preds, gts, ignores=[]):
|
| 392 |
+
"""Compute the F1-score for KIE task between predicted and ground truth dictionaries.
|
| 393 |
+
|
| 394 |
+
Args:
|
| 395 |
+
preds (dict): The predicted key-value pairs.
|
| 396 |
+
gts (dict): The ground truth key-value pairs.
|
| 397 |
+
ignores (list): The list of keys to ignore during evaluation.
|
| 398 |
+
|
| 399 |
+
Returns:
|
| 400 |
+
dict: A dictionary where keys are field names and values are their corresponding F1-scores.
|
| 401 |
+
"""
|
| 402 |
+
# Optionally remove ignored keys from predictions and ground truths
|
| 403 |
+
keys = set(preds.keys()).union(set(gts.keys())) - set(ignores)
|
| 404 |
+
f1_scores = {}
|
| 405 |
+
|
| 406 |
+
for key in keys:
|
| 407 |
+
pred_value = preds.get(key, None)
|
| 408 |
+
gt_value = gts.get(key, None)
|
| 409 |
+
|
| 410 |
+
if pred_value:
|
| 411 |
+
pred_value = pred_value.lower().strip().replace("\n"," ").replace(" ", "")
|
| 412 |
+
if gt_value:
|
| 413 |
+
gt_value = gt_value.lower().strip().replace("\n"," ").replace(" ", "")
|
| 414 |
+
|
| 415 |
+
if pred_value is None and gt_value is None:
|
| 416 |
+
continue
|
| 417 |
+
elif pred_value is None:
|
| 418 |
+
precision = 0.0
|
| 419 |
+
recall = 0.0
|
| 420 |
+
elif gt_value is None:
|
| 421 |
+
# false positive
|
| 422 |
+
precision = 0.0
|
| 423 |
+
recall = 0.0
|
| 424 |
+
else:
|
| 425 |
+
if pred_value == gt_value:
|
| 426 |
+
# True positive
|
| 427 |
+
precision = 1.0
|
| 428 |
+
recall = 1.0
|
| 429 |
+
else:
|
| 430 |
+
precision = 0.0
|
| 431 |
+
recall = 0.0
|
| 432 |
+
|
| 433 |
+
# Compute F1-score
|
| 434 |
+
f1_score = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0
|
| 435 |
+
f1_scores[key] = f1_score
|
| 436 |
+
|
| 437 |
+
if len(f1_scores) == 0:
|
| 438 |
+
return 0
|
| 439 |
+
average_f1 = sum(f1_scores.values()) / len(f1_scores)
|
| 440 |
+
|
| 441 |
+
return average_f1
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def pre_clean(text):
|
| 445 |
+
text = re.sub(r'<bos>|<eos>|<pad>|<unk>', '', text)
|
| 446 |
+
text = re.sub(r'\s##(\S)', r'\1', text)
|
| 447 |
+
text = re.sub(r'\\\s', r'\\', text)
|
| 448 |
+
text = re.sub(r'\s\*\s\*\s', r'**', text)
|
| 449 |
+
text = re.sub(r'{\s', r'{', text)
|
| 450 |
+
text = re.sub(r'\s}', r'}', text)
|
| 451 |
+
text = re.sub(r'\s}', r'}', text)
|
| 452 |
+
text = re.sub(r'\\begin\s', r'\\begin', text)
|
| 453 |
+
text = re.sub(r'\\end\s', r'\\end', text)
|
| 454 |
+
text = re.sub(r'\\end{table}', r'\\end{table} \n\n', text)
|
| 455 |
+
text = text.replace('\n', ' ')
|
| 456 |
+
text = text.replace('*', ' ')
|
| 457 |
+
text = text.replace('_', ' ')
|
| 458 |
+
return text
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def get_tree(input_str):
|
| 462 |
+
tree = (Node('ROOT').addkid(Node('TITLE')))
|
| 463 |
+
|
| 464 |
+
lines = input_str.split("\n")
|
| 465 |
+
lines = [pre_clean(line) for line in lines]
|
| 466 |
+
last_title = ''
|
| 467 |
+
for line in lines:
|
| 468 |
+
if line.startswith('#'):
|
| 469 |
+
child = tree.get('ROOT')
|
| 470 |
+
line = line.replace('#', '')
|
| 471 |
+
child.addkid(Node(line))
|
| 472 |
+
last_title = line
|
| 473 |
+
else:
|
| 474 |
+
if last_title == '':
|
| 475 |
+
child = tree.get('TITLE')
|
| 476 |
+
child.addkid(Node(line))
|
| 477 |
+
else:
|
| 478 |
+
child = tree.get(last_title)
|
| 479 |
+
child.addkid(Node(line))
|
| 480 |
+
return tree
|
| 481 |
+
|
| 482 |
+
def STEDS(pred_tree, ref_tree):
|
| 483 |
+
def my_distance(pred, ref):
|
| 484 |
+
if len(pred.split()) == 0 or len(ref.split()) == 0:
|
| 485 |
+
return 1
|
| 486 |
+
else:
|
| 487 |
+
return 0
|
| 488 |
+
total_distance = simple_distance(pred_tree, ref_tree, label_dist=my_distance)
|
| 489 |
+
num_of_nodes = max(len(list(pred_tree.iter())), len(list(ref_tree.iter())))
|
| 490 |
+
return 1-total_distance/num_of_nodes
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
def doc_parsing_evaluation(pred, gt):
|
| 494 |
+
score = 0
|
| 495 |
+
if not isinstance(pred, str):
|
| 496 |
+
return 0
|
| 497 |
+
pred_tree = get_tree(pred)
|
| 498 |
+
gt_tree = get_tree(gt)
|
| 499 |
+
score = STEDS(pred_tree, gt_tree)
|
| 500 |
+
|
| 501 |
+
return score
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
def wrap_html_table(html_table):
|
| 505 |
+
"""
|
| 506 |
+
The TEDS computation from PubTabNet code requires that the input html table should have <html>, <body>, and <table> tags.
|
| 507 |
+
Add them if they are missing.
|
| 508 |
+
"""
|
| 509 |
+
html_table = html_table.replace('\n','')
|
| 510 |
+
# add missing <table> tag if missing
|
| 511 |
+
if "<table" in html_table and "</table>" not in html_table:
|
| 512 |
+
html_table = html_table + "</table>"
|
| 513 |
+
elif "<table" not in html_table and "</table>" in html_table:
|
| 514 |
+
html_table = "<table>" + html_table
|
| 515 |
+
elif "<table" not in html_table and "</table>" not in html_table:
|
| 516 |
+
html_table = "<table>" + html_table + "</table>"
|
| 517 |
+
else:
|
| 518 |
+
pass
|
| 519 |
+
# add <body> and <html> tags if missing
|
| 520 |
+
if '<body>' not in html_table:
|
| 521 |
+
html_table = '<body>' + html_table + '</body>'
|
| 522 |
+
if '<html>' not in html_table:
|
| 523 |
+
html_table = '<html>' + html_table + '</html>'
|
| 524 |
+
return html_table
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def get_anls(s1, s2):
|
| 528 |
+
try:
|
| 529 |
+
s1 = s1.lower()
|
| 530 |
+
s2 = s2.lower()
|
| 531 |
+
except:
|
| 532 |
+
pass
|
| 533 |
+
if s1 == s2:
|
| 534 |
+
return 1.0
|
| 535 |
+
iou = 1 - editdistance.eval(s1, s2) / max(len(s1), len(s2))
|
| 536 |
+
anls = iou
|
| 537 |
+
return anls
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def ocr_eval(references,predictions):
|
| 541 |
+
socre_=0.0
|
| 542 |
+
None_num=0
|
| 543 |
+
for idx,ref_value in enumerate(references):
|
| 544 |
+
pred_value = predictions[idx]
|
| 545 |
+
pred_values, ref_values = [], []
|
| 546 |
+
if isinstance(pred_value, str):
|
| 547 |
+
pred_values.append(pred_value)
|
| 548 |
+
else:
|
| 549 |
+
pred_values = pred_value
|
| 550 |
+
if isinstance(ref_value, str):
|
| 551 |
+
ref_values.append(ref_value)
|
| 552 |
+
else:
|
| 553 |
+
ref_values = ref_value
|
| 554 |
+
|
| 555 |
+
temp_score = 0.0
|
| 556 |
+
temp_num = len(ref_values)
|
| 557 |
+
|
| 558 |
+
for tmpidx, tmpref in enumerate(ref_values):
|
| 559 |
+
tmppred = pred_values[tmpidx] if tmpidx < len(pred_values) else pred_values[0]
|
| 560 |
+
if len(pred_values) == 1 and tmppred != "None" and "None" not in ref_values: # pred 1, and not None
|
| 561 |
+
temp_score = max(temp_score, get_anls(tmppred, tmpref))
|
| 562 |
+
temp_num = len(ref_values)
|
| 563 |
+
else:
|
| 564 |
+
if tmppred=='None' and tmpref!='None':
|
| 565 |
+
temp_score += 0.0
|
| 566 |
+
elif tmpref=='None':
|
| 567 |
+
temp_num -= 1
|
| 568 |
+
else:
|
| 569 |
+
temp_score += get_anls(tmppred, tmpref)
|
| 570 |
+
if temp_num == 0:
|
| 571 |
+
ocr_score = 0.0
|
| 572 |
+
None_num += 1
|
| 573 |
+
else:
|
| 574 |
+
ocr_score = temp_score / (temp_num)
|
| 575 |
+
socre_ += ocr_score
|
| 576 |
+
if None_num == len(references):
|
| 577 |
+
return 9999
|
| 578 |
+
else:
|
| 579 |
+
return round(socre_ / (len(references)-None_num), 5)
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def csv_eval(predictions,references,easy, pred_type='json'):
|
| 583 |
+
predictions = predictions
|
| 584 |
+
labels = references
|
| 585 |
+
def is_int(val):
|
| 586 |
+
try:
|
| 587 |
+
int(val)
|
| 588 |
+
return True
|
| 589 |
+
except ValueError:
|
| 590 |
+
return False
|
| 591 |
+
|
| 592 |
+
def is_float(val):
|
| 593 |
+
try:
|
| 594 |
+
float(val)
|
| 595 |
+
return True
|
| 596 |
+
except ValueError:
|
| 597 |
+
return False
|
| 598 |
+
|
| 599 |
+
def convert_dict_to_list(data):
|
| 600 |
+
"""
|
| 601 |
+
Convert a dictionary to a list of tuples, handling both simple and nested dictionaries.
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
data (dict): The input dictionary, which might be nested or simple.
|
| 605 |
+
|
| 606 |
+
Returns:
|
| 607 |
+
list: A list of tuples generated from the input dictionary.
|
| 608 |
+
"""
|
| 609 |
+
# print(data)
|
| 610 |
+
converted_list = []
|
| 611 |
+
for key, value in data.items():
|
| 612 |
+
# Check if the value is a dictionary (indicating a nested structure)
|
| 613 |
+
if isinstance(value, dict):
|
| 614 |
+
# Handle nested dictionary
|
| 615 |
+
for subkey, subvalue in value.items():
|
| 616 |
+
# converted_list.append((key, subkey, subvalue))
|
| 617 |
+
converted_list.append((key, subkey, re.sub(r'[^\d.-]', '', str(subvalue))))
|
| 618 |
+
|
| 619 |
+
else:
|
| 620 |
+
# Handle simple key-value pair
|
| 621 |
+
# converted_list.append((key, "value", value))
|
| 622 |
+
converted_list.append((key, "value", re.sub(r'[^\d.-]', '', str(value))))
|
| 623 |
+
return converted_list
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
def csv2triples(csv, separator='\\t', delimiter='\\n'):
|
| 627 |
+
lines = csv.strip().split(delimiter)
|
| 628 |
+
header = lines[0].split(separator)
|
| 629 |
+
triples = []
|
| 630 |
+
for line in lines[1:]:
|
| 631 |
+
if not line:
|
| 632 |
+
continue
|
| 633 |
+
values = line.split(separator)
|
| 634 |
+
entity = values[0]
|
| 635 |
+
for i in range(1, len(values)):
|
| 636 |
+
if i >= len(header):
|
| 637 |
+
break
|
| 638 |
+
#---------------------------------------------------------
|
| 639 |
+
temp = [entity.strip(), header[i].strip()]
|
| 640 |
+
temp = [x if len(x)==0 or x[-1] != ':' else x[:-1] for x in temp]
|
| 641 |
+
value = values[i].strip()
|
| 642 |
+
value = re.sub(r'[^\d.-]', '', str(value))
|
| 643 |
+
# value = value.replace("%","")
|
| 644 |
+
# value = value.replace("$","")
|
| 645 |
+
triples.append((temp[0], temp[1], value))
|
| 646 |
+
#---------------------------------------------------------
|
| 647 |
+
return triples
|
| 648 |
+
|
| 649 |
+
def csv2triples_noheader(csv, separator='\\t', delimiter='\\n'):
|
| 650 |
+
lines = csv.strip().split(delimiter)
|
| 651 |
+
maybe_header = [x.strip() for x in lines[0].split(separator)]
|
| 652 |
+
not_header = False
|
| 653 |
+
if len(maybe_header) > 2:
|
| 654 |
+
for c in maybe_header[1:]:
|
| 655 |
+
try:
|
| 656 |
+
num = float(c)
|
| 657 |
+
not_header = True
|
| 658 |
+
except:
|
| 659 |
+
continue
|
| 660 |
+
if not_header:
|
| 661 |
+
break
|
| 662 |
+
header = None if not_header else maybe_header
|
| 663 |
+
data_start = 0 if not_header and separator in lines[0] else 1
|
| 664 |
+
triples = []
|
| 665 |
+
for line in lines[data_start:]:
|
| 666 |
+
if not line:
|
| 667 |
+
continue
|
| 668 |
+
values = [x.strip() for x in line.split(separator)]
|
| 669 |
+
entity = values[0]
|
| 670 |
+
for i in range(1, len(values)):
|
| 671 |
+
try:
|
| 672 |
+
temp = [entity if entity[-1]!=':' else entity[:-1], ""]
|
| 673 |
+
except:
|
| 674 |
+
temp = [entity, ""]
|
| 675 |
+
if header is not None:
|
| 676 |
+
try:
|
| 677 |
+
this_header = header[i]
|
| 678 |
+
temp = [entity, this_header]
|
| 679 |
+
temp = [x if x[-1] != ':' else x[:-1] for x in temp]
|
| 680 |
+
except:
|
| 681 |
+
this_header = entity.strip()
|
| 682 |
+
value = values[i].strip()
|
| 683 |
+
value = re.sub(r'[^\d.-]', '', str(value))
|
| 684 |
+
# value = value.replace("%","")
|
| 685 |
+
# value = value.replace("$","")
|
| 686 |
+
triples.append((temp[0], temp[1], value))
|
| 687 |
+
#---------------------------------------------------------
|
| 688 |
+
return triples
|
| 689 |
+
|
| 690 |
+
def process_triplets(triplets):
|
| 691 |
+
new_triplets = []
|
| 692 |
+
for triplet in triplets:
|
| 693 |
+
new_triplet = []
|
| 694 |
+
triplet_temp = []
|
| 695 |
+
if len(triplet) > 2:
|
| 696 |
+
if is_int(triplet[2]) or is_float(triplet[2]):
|
| 697 |
+
triplet_temp = (triplet[0].lower(), triplet[1].lower(), float(triplet[2]))
|
| 698 |
+
else:
|
| 699 |
+
triplet_temp = (triplet[0].lower(), triplet[1].lower(), triplet[2].lower())
|
| 700 |
+
else:
|
| 701 |
+
triplet_temp = (triplet[0].lower(), triplet[1].lower(), "no meaning")
|
| 702 |
+
new_triplets.append(triplet_temp)
|
| 703 |
+
return new_triplets
|
| 704 |
+
|
| 705 |
+
def intersection_with_tolerance(a, b, tol_word, tol_num):
|
| 706 |
+
a = set(a)
|
| 707 |
+
b = set(b)
|
| 708 |
+
c = set()
|
| 709 |
+
for elem1 in a:
|
| 710 |
+
for elem2 in b:
|
| 711 |
+
if is_float(elem1[-1]) and is_float(elem2[-1]):
|
| 712 |
+
if ((Levenshtein.distance(''.join(elem1[:-1]),''.join(elem2[:-1])) <= tol_word) and (abs(elem1[-1] - elem2[-1]) / (abs(elem2[-1])+0.000001) <= tol_num))or \
|
| 713 |
+
((''.join(elem1[:-1]) in ''.join(elem2[:-1])) and (abs(elem1[-1] - elem2[-1]) / (abs(elem2[-1])+0.000001) <= tol_num)) or \
|
| 714 |
+
((''.join(elem2[:-1]) in ''.join(elem1[:-1])) and (abs(elem1[-1] - elem2[-1]) / (abs(elem2[-1])+0.000001) <= tol_num)):
|
| 715 |
+
c.add(elem1)
|
| 716 |
+
else:
|
| 717 |
+
if (Levenshtein.distance(''.join([str(i) for i in elem1]),''.join([str(j) for j in elem2])) <= tol_word):
|
| 718 |
+
c.add(elem1)
|
| 719 |
+
return list(c)
|
| 720 |
+
|
| 721 |
+
def union_with_tolerance(a, b, tol_word, tol_num):
|
| 722 |
+
c = set(a) | set(b)
|
| 723 |
+
d = set(a) & set(b)
|
| 724 |
+
e = intersection_with_tolerance(a, b, tol_word, tol_num)
|
| 725 |
+
f = set(e)
|
| 726 |
+
g = c-(f-d)
|
| 727 |
+
return list(g)
|
| 728 |
+
|
| 729 |
+
def get_eval_list(pred_csv, label_csv, separator='\\t', delimiter='\\n', tol_word=3, tol_num=0.05, pred_type='json'):
|
| 730 |
+
|
| 731 |
+
if pred_type == 'json':
|
| 732 |
+
pred_triple_list=[]
|
| 733 |
+
for it in pred_csv:
|
| 734 |
+
pred_triple_temp = convert_dict_to_list(it)
|
| 735 |
+
pred_triple_pre = process_triplets(pred_triple_temp)
|
| 736 |
+
pred_triple_list.append(pred_triple_pre)
|
| 737 |
+
else:
|
| 738 |
+
pred_triple_list=[]
|
| 739 |
+
for it in pred_csv:
|
| 740 |
+
pred_triple_temp = csv2triples(it, separator=separator, delimiter=delimiter)
|
| 741 |
+
# pred_triple_temp = csv2triples_noheader(it, separator=separator, delimiter=delimiter)
|
| 742 |
+
pred_triple_pre = process_triplets(pred_triple_temp)
|
| 743 |
+
pred_triple_list.append(pred_triple_pre)
|
| 744 |
+
|
| 745 |
+
label_triple_list=[]
|
| 746 |
+
for it in label_csv:
|
| 747 |
+
label_triple_temp = convert_dict_to_list(it)
|
| 748 |
+
label_triple_pre = process_triplets(label_triple_temp)
|
| 749 |
+
label_triple_list.append(label_triple_pre)
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
intersection_list=[]
|
| 753 |
+
union_list=[]
|
| 754 |
+
sim_list=[]
|
| 755 |
+
# for each chart image
|
| 756 |
+
for pred,label in zip(pred_triple_list, label_triple_list):
|
| 757 |
+
for idx in range(len(pred)):
|
| 758 |
+
try:
|
| 759 |
+
if label[idx][1] == "value" and "value" not in pred[idx][:2]:
|
| 760 |
+
pred[idx] = (pred[idx][0], "value", pred[idx][2])
|
| 761 |
+
temp_pred_head = sorted(pred[idx][:2])
|
| 762 |
+
temp_gt_head = sorted(label[idx][:2])
|
| 763 |
+
pred[idx] = (temp_pred_head[0], temp_pred_head[1], pred[idx][2])
|
| 764 |
+
label[idx] = (temp_gt_head[0], temp_gt_head[1], label[idx][2])
|
| 765 |
+
except:
|
| 766 |
+
continue
|
| 767 |
+
intersection = intersection_with_tolerance(pred, label, tol_word = tol_word, tol_num=tol_num)
|
| 768 |
+
union = union_with_tolerance(pred, label, tol_word = tol_word, tol_num=tol_num)
|
| 769 |
+
sim = len(intersection)/len(union)
|
| 770 |
+
intersection_list.append(intersection)
|
| 771 |
+
union_list.append(union)
|
| 772 |
+
sim_list.append(sim)
|
| 773 |
+
return intersection_list, union_list, sim_list
|
| 774 |
+
|
| 775 |
+
def get_ap(predictions, labels, sim_threhold, tolerance, separator='\\t', delimiter='\\n', easy=1):
|
| 776 |
+
if tolerance == 'strict':
|
| 777 |
+
tol_word=0
|
| 778 |
+
if easy == 1:
|
| 779 |
+
tol_num=0
|
| 780 |
+
else:
|
| 781 |
+
tol_num=0.1
|
| 782 |
+
|
| 783 |
+
elif tolerance == 'slight':
|
| 784 |
+
tol_word=2
|
| 785 |
+
if easy == 1:
|
| 786 |
+
tol_num=0.05
|
| 787 |
+
else:
|
| 788 |
+
tol_num=0.3
|
| 789 |
+
|
| 790 |
+
elif tolerance == 'high':
|
| 791 |
+
tol_word= 5
|
| 792 |
+
if easy == 1:
|
| 793 |
+
tol_num=0.1
|
| 794 |
+
else:
|
| 795 |
+
tol_num=0.5
|
| 796 |
+
intersection_list, union_list, sim_list = get_eval_list(predictions, labels, separator=separator, delimiter=delimiter, tol_word=tol_word, tol_num=tol_num, pred_type=pred_type)
|
| 797 |
+
ap = len([num for num in sim_list if num >= sim_threhold])/(len(sim_list)+1e-16)
|
| 798 |
+
return ap
|
| 799 |
+
|
| 800 |
+
map_strict = 0
|
| 801 |
+
map_slight = 0
|
| 802 |
+
map_high = 0
|
| 803 |
+
s="\\t"
|
| 804 |
+
d="\\n"
|
| 805 |
+
|
| 806 |
+
for sim_threhold in np.arange (0.5, 1, 0.05):
|
| 807 |
+
map_temp_strict = get_ap(predictions, labels, sim_threhold=sim_threhold, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
| 808 |
+
map_temp_slight = get_ap(predictions, labels, sim_threhold=sim_threhold, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
| 809 |
+
map_temp_high = get_ap(predictions, labels, sim_threhold=sim_threhold, tolerance='high', separator=s, delimiter=d, easy=easy)
|
| 810 |
+
map_strict += map_temp_strict/10
|
| 811 |
+
map_slight += map_temp_slight/10
|
| 812 |
+
map_high += map_temp_high/10
|
| 813 |
+
|
| 814 |
+
em = get_ap(predictions, labels, sim_threhold=1, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
| 815 |
+
ap_50_strict = get_ap(predictions, labels, sim_threhold=0.5, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
| 816 |
+
ap_75_strict = get_ap(predictions, labels, sim_threhold=0.75, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
| 817 |
+
ap_90_strict = get_ap(predictions, labels, sim_threhold=0.90, tolerance='strict', separator=s, delimiter=d, easy=easy)
|
| 818 |
+
ap_50_slight = get_ap(predictions, labels, sim_threhold=0.5, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
| 819 |
+
ap_75_slight = get_ap(predictions, labels, sim_threhold=0.75, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
| 820 |
+
ap_90_slight = get_ap(predictions, labels, sim_threhold=0.90, tolerance='slight', separator=s, delimiter=d, easy=easy)
|
| 821 |
+
ap_50_high = get_ap(predictions, labels, sim_threhold=0.5, tolerance='high', separator=s, delimiter=d, easy=easy)
|
| 822 |
+
ap_75_high = get_ap(predictions, labels, sim_threhold=0.75, tolerance='high', separator=s, delimiter=d, easy=easy)
|
| 823 |
+
ap_90_high = get_ap(predictions, labels, sim_threhold=0.90, tolerance='high', separator=s, delimiter=d, easy=easy)
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
return em, map_strict, map_slight, map_high, ap_50_strict, ap_75_strict, ap_90_strict, ap_50_slight, ap_75_slight, ap_90_slight, ap_50_high, ap_75_high, ap_90_high
|
| 827 |
+
|
| 828 |
+
def draw_SCRM_table(em, map_strict, map_slight, map_high, ap_50_strict, ap_75_strict, ap_90_strict, ap_50_slight, ap_75_slight, ap_90_slight, ap_50_high, ap_75_high, ap_90_high,title_ocr_socre,source_ocr_socre,x_title_ocr_socre,y_title_ocr_socre,structure_accuracy):
|
| 829 |
+
|
| 830 |
+
result=f'''
|
| 831 |
+
-----------------------------------------------------------\n
|
| 832 |
+
| Metrics | Sim_threshold | Tolerance | Value |\n
|
| 833 |
+
-----------------------------------------------------------\n
|
| 834 |
+
| | | strict | {'%.4f' % map_strict} | \n
|
| 835 |
+
| | ----------------------------\n
|
| 836 |
+
| mPrecison | 0.5:0.05:0.95 | slight | {'%.4f' % map_slight} |\n
|
| 837 |
+
| | ---------------------------\n
|
| 838 |
+
| | | high | {'%.4f' % map_high} |\n
|
| 839 |
+
-----------------------------------------------------------\n
|
| 840 |
+
| | | strict | {'%.4f' % ap_50_strict} |\n
|
| 841 |
+
| | ---------------------------\n
|
| 842 |
+
| Precison | 0.5 | slight | {'%.4f' % ap_50_slight } |\n
|
| 843 |
+
| | ---------------------------\n
|
| 844 |
+
| | | high | {'%.4f' % ap_50_high } |\n
|
| 845 |
+
-----------------------------------------------------------\n
|
| 846 |
+
| | | strict | {'%.4f' % ap_75_strict} |\n
|
| 847 |
+
| | ---------------------------\n
|
| 848 |
+
| Precison | 0.75 | slight | {'%.4f' % ap_75_slight} |\n
|
| 849 |
+
| | ---------------------------\n
|
| 850 |
+
| | | high | {'%.4f' % ap_75_high} |\n
|
| 851 |
+
-----------------------------------------------------------\n
|
| 852 |
+
| | | strict | {'%.4f' % ap_90_strict} |\n
|
| 853 |
+
| | ---------------------------\n
|
| 854 |
+
| Precison | 0.9 | slight | {'%.4f' % ap_90_slight } |\n
|
| 855 |
+
| | ---------------------------\n
|
| 856 |
+
| | | high | {'%.4f' % ap_90_high} |\n
|
| 857 |
+
-----------------------------------------------------------\n
|
| 858 |
+
|Precison(EM) | {'%.4f' % em} |\n
|
| 859 |
+
-----------------------------------------------------------\n
|
| 860 |
+
|Title(EM) | {'%.4f' % title_ocr_socre} |\n
|
| 861 |
+
-----------------------------------------------------------\n
|
| 862 |
+
|Source(EM) | {'%.4f' % source_ocr_socre} |\n
|
| 863 |
+
-----------------------------------------------------------\n
|
| 864 |
+
|X_title(EM) | {'%.4f' % x_title_ocr_socre} |\n
|
| 865 |
+
-----------------------------------------------------------\n
|
| 866 |
+
|Y_title(EM) | {'%.4f' % y_title_ocr_socre} |\n
|
| 867 |
+
-----------------------------------------------------------\n
|
| 868 |
+
|structure_acc| {'%.4f' % structure_accuracy} |\n
|
| 869 |
+
-----------------------------------------------------------\n
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
'''
|
| 873 |
+
return result
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
if __name__ == '__main__':
|
| 877 |
+
import json
|
| 878 |
+
import pprint
|
| 879 |
+
|
| 880 |
+
# markdown structure for Table Parsing task
|
| 881 |
+
pred_markdown = "| 1 | august 5 , 1972 | detroit lions | l 23 - 31 | 0 - 1 |\n| 2 | august 12 , 1972 | green bay packers | l 13 - 14 | 0 - 2 |\n| 3 | august 19 , 1972 | cincinnati bengals | w 35 - 17 | 1 - 2 |\n| 4 | august 25 , 1972 | atlanta falcons | w 24 - 10 | 2 - 2 |\n| 5 | august 31 , 1972 | washington redskins | l 24 - 27 | 2 - 3 |\n| 6 | september 10 , 1972 | minnesota vikings | w 21 - 19 | 3 - 3 |"
|
| 882 |
+
true_markdown = "| week | date | opponent | result | record |\n| --- | --- | --- | --- | --- |\n| 1 | august 5 , 1972 | detroit lions | l 23 - 31 | 0 - 1 |\n| 2 | august 12 , 1972 | green bay packers | l 13 - 14 | 0 - 2 |\n| 3 | august 19 , 1972 | cincinnati bengals | w 35 - 17 | 1 - 2 |\n| 4 | august 25 , 1972 | atlanta falcons | w 24 - 10 | 2 - 2 |\n| 5 | august 31 , 1972 | washington redskins | l 24 - 27 | 2 - 3 |\n| 6 | september 10 , 1972 | minnesota vikings | w 21 - 19 | 3 - 3 |"
|
| 883 |
+
teds = TEDS(n_jobs=4)
|
| 884 |
+
pred_table_html = convert_markdown_table_to_html(pred_markdown)
|
| 885 |
+
true_table_html = convert_markdown_table_to_html(true_markdown)
|
| 886 |
+
|
| 887 |
+
scores = teds.evaluate(pred_table_html, true_table_html)
|
| 888 |
+
|
| 889 |
+
pp = pprint.PrettyPrinter()
|
| 890 |
+
pp.pprint(scores)
|
| 891 |
+
|
| 892 |
+
# dict structure for Key Information Extraction task
|
| 893 |
+
pred_dict = {
|
| 894 |
+
"company": [
|
| 895 |
+
"OLD TOWN "
|
| 896 |
+
],
|
| 897 |
+
"date": [
|
| 898 |
+
"2024"
|
| 899 |
+
],
|
| 900 |
+
"address": [
|
| 901 |
+
"SRI RAMPAI"
|
| 902 |
+
],
|
| 903 |
+
"total": [
|
| 904 |
+
"30"
|
| 905 |
+
]
|
| 906 |
+
}
|
| 907 |
+
true_dict = {
|
| 908 |
+
"company": [
|
| 909 |
+
"OLD TOWN KOPITAM SND BHD"
|
| 910 |
+
],
|
| 911 |
+
"date": [
|
| 912 |
+
"2024/9/27"
|
| 913 |
+
],
|
| 914 |
+
"address": [
|
| 915 |
+
"SRI RAMPAI"
|
| 916 |
+
],
|
| 917 |
+
"total": [
|
| 918 |
+
"30"
|
| 919 |
+
]
|
| 920 |
+
}
|
| 921 |
+
teds = TEDS(n_jobs=4)
|
| 922 |
+
pred_dict_html = dict_to_html(pred_dict)
|
| 923 |
+
true_dict_html = dict_to_html(true_dict)
|
| 924 |
+
print(pred_dict_html)
|
| 925 |
+
print(true_dict_html)
|
| 926 |
+
|
| 927 |
+
scores = teds.evaluate(pred_dict_html, true_dict_html)
|
| 928 |
+
|
| 929 |
+
pp = pprint.PrettyPrinter()
|
| 930 |
+
pp.pprint(scores)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/__pycache__/rrc_evaluation_funcs_1_1.cpython-310.pyc
ADDED
|
Binary file (15.5 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/__pycache__/script.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/Ocrbench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py
ADDED
|
@@ -0,0 +1,456 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
#!/usr/bin/env python3
|
| 3 |
+
|
| 4 |
+
#File: rrc_evaluation_funcs_1_1.py
|
| 5 |
+
#Version: 1.1
|
| 6 |
+
#Version info: changes for Python 3
|
| 7 |
+
#Date: 2019-12-29
|
| 8 |
+
#Description: File with useful functions to use by the evaluation scripts in the RRC website.
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
import sys;
|
| 12 |
+
sys.path.append('./')
|
| 13 |
+
import zipfile
|
| 14 |
+
import re
|
| 15 |
+
import os
|
| 16 |
+
import importlib
|
| 17 |
+
|
| 18 |
+
def print_help():
|
| 19 |
+
sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0])
|
| 20 |
+
sys.exit(2)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def load_zip_file_keys(file,fileNameRegExp=''):
|
| 24 |
+
"""
|
| 25 |
+
Returns an array with the entries of the ZIP file that match with the regular expression.
|
| 26 |
+
The key's are the names or the file or the capturing group definied in the fileNameRegExp
|
| 27 |
+
"""
|
| 28 |
+
try:
|
| 29 |
+
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
|
| 30 |
+
except :
|
| 31 |
+
raise Exception('Error loading the ZIP archive.')
|
| 32 |
+
|
| 33 |
+
pairs = []
|
| 34 |
+
|
| 35 |
+
for name in archive.namelist():
|
| 36 |
+
addFile = True
|
| 37 |
+
keyName = name
|
| 38 |
+
if fileNameRegExp!="":
|
| 39 |
+
m = re.match(fileNameRegExp,name)
|
| 40 |
+
if m == None:
|
| 41 |
+
addFile = False
|
| 42 |
+
else:
|
| 43 |
+
if len(m.groups())>0:
|
| 44 |
+
keyName = m.group(1)
|
| 45 |
+
|
| 46 |
+
if addFile:
|
| 47 |
+
pairs.append( keyName )
|
| 48 |
+
|
| 49 |
+
return pairs
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def load_zip_file(file,fileNameRegExp='',allEntries=False):
|
| 53 |
+
"""
|
| 54 |
+
Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
|
| 55 |
+
The key's are the names or the file or the capturing group definied in the fileNameRegExp
|
| 56 |
+
allEntries validates that all entries in the ZIP file pass the fileNameRegExp
|
| 57 |
+
"""
|
| 58 |
+
try:
|
| 59 |
+
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
|
| 60 |
+
except :
|
| 61 |
+
raise Exception('Error loading the ZIP archive')
|
| 62 |
+
|
| 63 |
+
pairs = []
|
| 64 |
+
for name in archive.namelist():
|
| 65 |
+
addFile = True
|
| 66 |
+
keyName = name
|
| 67 |
+
if fileNameRegExp!="":
|
| 68 |
+
m = re.match(fileNameRegExp,name)
|
| 69 |
+
if m == None:
|
| 70 |
+
addFile = False
|
| 71 |
+
else:
|
| 72 |
+
if len(m.groups())>0:
|
| 73 |
+
keyName = m.group(1)
|
| 74 |
+
|
| 75 |
+
if addFile:
|
| 76 |
+
pairs.append( [ keyName , archive.read(name)] )
|
| 77 |
+
else:
|
| 78 |
+
if allEntries:
|
| 79 |
+
raise Exception('ZIP entry not valid: %s' %name)
|
| 80 |
+
|
| 81 |
+
return dict(pairs)
|
| 82 |
+
|
| 83 |
+
def decode_utf8(raw):
|
| 84 |
+
"""
|
| 85 |
+
Returns a Unicode object on success, or None on failure
|
| 86 |
+
"""
|
| 87 |
+
try:
|
| 88 |
+
return raw.decode('utf-8-sig',errors = 'replace')
|
| 89 |
+
except:
|
| 90 |
+
return None
|
| 91 |
+
|
| 92 |
+
def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
|
| 93 |
+
"""
|
| 94 |
+
This function validates that all lines of the file calling the Line validation function for each line
|
| 95 |
+
"""
|
| 96 |
+
utf8File = decode_utf8(file_contents)
|
| 97 |
+
if (utf8File is None) :
|
| 98 |
+
raise Exception("The file %s is not UTF-8" %fileName)
|
| 99 |
+
|
| 100 |
+
lines = utf8File.split( "\r\n" if CRLF else "\n" )
|
| 101 |
+
for line in lines:
|
| 102 |
+
line = line.replace("\r","").replace("\n","")
|
| 103 |
+
if(line != ""):
|
| 104 |
+
try:
|
| 105 |
+
validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
|
| 106 |
+
except Exception as e:
|
| 107 |
+
raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace'))
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0):
|
| 112 |
+
"""
|
| 113 |
+
Validate the format of the line. If the line is not valid an exception will be raised.
|
| 114 |
+
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
| 115 |
+
Posible values are:
|
| 116 |
+
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
|
| 117 |
+
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
|
| 118 |
+
"""
|
| 119 |
+
get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
|
| 123 |
+
"""
|
| 124 |
+
Validate the format of the line. If the line is not valid an exception will be raised.
|
| 125 |
+
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
| 126 |
+
Posible values are:
|
| 127 |
+
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
|
| 128 |
+
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
|
| 129 |
+
Returns values from a textline. Points , [Confidences], [Transcriptions]
|
| 130 |
+
"""
|
| 131 |
+
confidence = 0.0
|
| 132 |
+
transcription = "";
|
| 133 |
+
points = []
|
| 134 |
+
|
| 135 |
+
numPoints = 4;
|
| 136 |
+
|
| 137 |
+
if LTRB:
|
| 138 |
+
|
| 139 |
+
numPoints = 4;
|
| 140 |
+
|
| 141 |
+
if withTranscription and withConfidence:
|
| 142 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
| 143 |
+
if m == None :
|
| 144 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
| 145 |
+
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription")
|
| 146 |
+
elif withConfidence:
|
| 147 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
|
| 148 |
+
if m == None :
|
| 149 |
+
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence")
|
| 150 |
+
elif withTranscription:
|
| 151 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line)
|
| 152 |
+
if m == None :
|
| 153 |
+
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription")
|
| 154 |
+
else:
|
| 155 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line)
|
| 156 |
+
if m == None :
|
| 157 |
+
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax")
|
| 158 |
+
|
| 159 |
+
xmin = int(m.group(1))
|
| 160 |
+
ymin = int(m.group(2))
|
| 161 |
+
xmax = int(m.group(3))
|
| 162 |
+
ymax = int(m.group(4))
|
| 163 |
+
if(xmax<xmin):
|
| 164 |
+
raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax))
|
| 165 |
+
if(ymax<ymin):
|
| 166 |
+
raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax))
|
| 167 |
+
|
| 168 |
+
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
|
| 169 |
+
|
| 170 |
+
if (imWidth>0 and imHeight>0):
|
| 171 |
+
validate_point_inside_bounds(xmin,ymin,imWidth,imHeight);
|
| 172 |
+
validate_point_inside_bounds(xmax,ymax,imWidth,imHeight);
|
| 173 |
+
|
| 174 |
+
else:
|
| 175 |
+
|
| 176 |
+
numPoints = 8;
|
| 177 |
+
|
| 178 |
+
if withTranscription and withConfidence:
|
| 179 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
| 180 |
+
if m == None :
|
| 181 |
+
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription")
|
| 182 |
+
elif withConfidence:
|
| 183 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
|
| 184 |
+
if m == None :
|
| 185 |
+
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence")
|
| 186 |
+
elif withTranscription:
|
| 187 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line)
|
| 188 |
+
if m == None :
|
| 189 |
+
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription")
|
| 190 |
+
else:
|
| 191 |
+
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line)
|
| 192 |
+
if m == None :
|
| 193 |
+
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4")
|
| 194 |
+
|
| 195 |
+
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
|
| 196 |
+
|
| 197 |
+
validate_clockwise_points(points)
|
| 198 |
+
|
| 199 |
+
if (imWidth>0 and imHeight>0):
|
| 200 |
+
validate_point_inside_bounds(points[0],points[1],imWidth,imHeight);
|
| 201 |
+
validate_point_inside_bounds(points[2],points[3],imWidth,imHeight);
|
| 202 |
+
validate_point_inside_bounds(points[4],points[5],imWidth,imHeight);
|
| 203 |
+
validate_point_inside_bounds(points[6],points[7],imWidth,imHeight);
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
if withConfidence:
|
| 207 |
+
try:
|
| 208 |
+
confidence = float(m.group(numPoints+1))
|
| 209 |
+
except ValueError:
|
| 210 |
+
raise Exception("Confidence value must be a float")
|
| 211 |
+
|
| 212 |
+
if withTranscription:
|
| 213 |
+
posTranscription = numPoints + (2 if withConfidence else 1)
|
| 214 |
+
transcription = m.group(posTranscription)
|
| 215 |
+
m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription)
|
| 216 |
+
if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters
|
| 217 |
+
transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"")
|
| 218 |
+
|
| 219 |
+
return points,confidence,transcription
|
| 220 |
+
|
| 221 |
+
def get_tl_dict_values(detection,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,validNumPoints=[],validate_cw=True):
|
| 222 |
+
"""
|
| 223 |
+
Validate the format of the dictionary. If the dictionary is not valid an exception will be raised.
|
| 224 |
+
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
| 225 |
+
Posible values:
|
| 226 |
+
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]]}
|
| 227 |
+
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"illegibility":false}
|
| 228 |
+
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"dontCare":false}
|
| 229 |
+
Returns values from the dictionary. Points , [Confidences], [Transcriptions]
|
| 230 |
+
"""
|
| 231 |
+
confidence = 0.0
|
| 232 |
+
transcription = "";
|
| 233 |
+
points = []
|
| 234 |
+
|
| 235 |
+
if isinstance(detection, dict) == False :
|
| 236 |
+
raise Exception("Incorrect format. Object has to be a dictionary")
|
| 237 |
+
|
| 238 |
+
if not 'points' in detection:
|
| 239 |
+
raise Exception("Incorrect format. Object has no points key)")
|
| 240 |
+
|
| 241 |
+
if isinstance(detection['points'], list) == False :
|
| 242 |
+
raise Exception("Incorrect format. Object points key have to be an array)")
|
| 243 |
+
|
| 244 |
+
num_points = len(detection['points'])
|
| 245 |
+
|
| 246 |
+
if num_points<3 :
|
| 247 |
+
raise Exception("Incorrect format. Incorrect number of points. At least 3 points are necessary. Found: " + str(num_points))
|
| 248 |
+
|
| 249 |
+
if(len(validNumPoints)>0 and num_points in validNumPoints == False ):
|
| 250 |
+
raise Exception("Incorrect format. Incorrect number of points. Only allowed 4,8 or 12 points)")
|
| 251 |
+
|
| 252 |
+
for i in range(num_points):
|
| 253 |
+
if isinstance(detection['points'][i], list) == False :
|
| 254 |
+
raise Exception("Incorrect format. Point #" + str(i+1) + " has to be an array)")
|
| 255 |
+
|
| 256 |
+
if len(detection['points'][i]) != 2 :
|
| 257 |
+
raise Exception("Incorrect format. Point #" + str(i+1) + " has to be an array with 2 objects(x,y) )")
|
| 258 |
+
|
| 259 |
+
if isinstance(detection['points'][i][0], (int,float) ) == False or isinstance(detection['points'][i][1], (int,float) ) == False :
|
| 260 |
+
raise Exception("Incorrect format. Point #" + str(i+1) + " childs have to be Integers)")
|
| 261 |
+
|
| 262 |
+
if (imWidth>0 and imHeight>0):
|
| 263 |
+
validate_point_inside_bounds(detection['points'][i][0],detection['points'][i][1],imWidth,imHeight);
|
| 264 |
+
|
| 265 |
+
points.append(float(detection['points'][i][0]))
|
| 266 |
+
points.append(float(detection['points'][i][1]))
|
| 267 |
+
|
| 268 |
+
if validate_cw :
|
| 269 |
+
validate_clockwise_points(points)
|
| 270 |
+
|
| 271 |
+
if withConfidence:
|
| 272 |
+
if not 'confidence' in detection:
|
| 273 |
+
raise Exception("Incorrect format. No confidence key)")
|
| 274 |
+
|
| 275 |
+
if isinstance(detection['confidence'], (int,float)) == False :
|
| 276 |
+
raise Exception("Incorrect format. Confidence key has to be a float)")
|
| 277 |
+
|
| 278 |
+
if detection['confidence']<0 or detection['confidence']>1 :
|
| 279 |
+
raise Exception("Incorrect format. Confidence key has to be a float between 0.0 and 1.0")
|
| 280 |
+
|
| 281 |
+
confidence = detection['confidence']
|
| 282 |
+
|
| 283 |
+
if withTranscription:
|
| 284 |
+
if not 'transcription' in detection:
|
| 285 |
+
raise Exception("Incorrect format. No transcription key)")
|
| 286 |
+
|
| 287 |
+
if isinstance(detection['transcription'], str) == False :
|
| 288 |
+
raise Exception("Incorrect format. Transcription has to be a string. Detected: " + type(detection['transcription']).__name__ )
|
| 289 |
+
|
| 290 |
+
transcription = detection['transcription']
|
| 291 |
+
|
| 292 |
+
if 'illegibility' in detection: #Ensures that if illegibility atribute is present and is True the transcription is set to ### (don't care)
|
| 293 |
+
if detection['illegibility'] == True:
|
| 294 |
+
transcription = "###"
|
| 295 |
+
|
| 296 |
+
if 'dontCare' in detection: #Ensures that if dontCare atribute is present and is True the transcription is set to ### (don't care)
|
| 297 |
+
if detection['dontCare'] == True:
|
| 298 |
+
transcription = "###"
|
| 299 |
+
|
| 300 |
+
return points,confidence,transcription
|
| 301 |
+
|
| 302 |
+
def validate_point_inside_bounds(x,y,imWidth,imHeight):
|
| 303 |
+
if(x<0 or x>imWidth):
|
| 304 |
+
raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight))
|
| 305 |
+
if(y<0 or y>imHeight):
|
| 306 |
+
raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight))
|
| 307 |
+
|
| 308 |
+
def validate_clockwise_points(points):
|
| 309 |
+
"""
|
| 310 |
+
Validates that the points are in clockwise order.
|
| 311 |
+
"""
|
| 312 |
+
edge = []
|
| 313 |
+
for i in range(len(points)//2):
|
| 314 |
+
edge.append( (int(points[(i+1)*2 % len(points)]) - int(points[i*2])) * (int(points[ ((i+1)*2+1) % len(points)]) + int(points[i*2+1])) )
|
| 315 |
+
if sum(edge)>0:
|
| 316 |
+
raise Exception("Points are not clockwise. The coordinates of bounding points have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.")
|
| 317 |
+
|
| 318 |
+
def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True):
|
| 319 |
+
"""
|
| 320 |
+
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
|
| 321 |
+
xmin,ymin,xmax,ymax,[confidence],[transcription]
|
| 322 |
+
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
|
| 323 |
+
"""
|
| 324 |
+
pointsList = []
|
| 325 |
+
transcriptionsList = []
|
| 326 |
+
confidencesList = []
|
| 327 |
+
|
| 328 |
+
lines = content.split( "\r\n" if CRLF else "\n" )
|
| 329 |
+
for line in lines:
|
| 330 |
+
line = line.replace("\r","").replace("\n","")
|
| 331 |
+
if(line != "") :
|
| 332 |
+
points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight);
|
| 333 |
+
pointsList.append(points)
|
| 334 |
+
transcriptionsList.append(transcription)
|
| 335 |
+
confidencesList.append(confidence)
|
| 336 |
+
|
| 337 |
+
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
|
| 338 |
+
import numpy as np
|
| 339 |
+
sorted_ind = np.argsort(-np.array(confidencesList))
|
| 340 |
+
confidencesList = [confidencesList[i] for i in sorted_ind]
|
| 341 |
+
pointsList = [pointsList[i] for i in sorted_ind]
|
| 342 |
+
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
|
| 343 |
+
|
| 344 |
+
return pointsList,confidencesList,transcriptionsList
|
| 345 |
+
|
| 346 |
+
def get_tl_dict_values_from_array(array,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True,validNumPoints=[],validate_cw=True):
|
| 347 |
+
"""
|
| 348 |
+
Returns all points, confindences and transcriptions of a file in lists. Valid dict formats:
|
| 349 |
+
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4}
|
| 350 |
+
"""
|
| 351 |
+
pointsList = []
|
| 352 |
+
transcriptionsList = []
|
| 353 |
+
confidencesList = []
|
| 354 |
+
|
| 355 |
+
for n in range(len(array)):
|
| 356 |
+
objectDict = array[n]
|
| 357 |
+
points, confidence, transcription = get_tl_dict_values(objectDict,withTranscription,withConfidence,imWidth,imHeight,validNumPoints,validate_cw);
|
| 358 |
+
pointsList.append(points)
|
| 359 |
+
transcriptionsList.append(transcription)
|
| 360 |
+
confidencesList.append(confidence)
|
| 361 |
+
|
| 362 |
+
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
|
| 363 |
+
import numpy as np
|
| 364 |
+
sorted_ind = np.argsort(-np.array(confidencesList))
|
| 365 |
+
confidencesList = [confidencesList[i] for i in sorted_ind]
|
| 366 |
+
pointsList = [pointsList[i] for i in sorted_ind]
|
| 367 |
+
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
|
| 368 |
+
|
| 369 |
+
return pointsList,confidencesList,transcriptionsList
|
| 370 |
+
|
| 371 |
+
def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True):
|
| 372 |
+
"""
|
| 373 |
+
This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
|
| 374 |
+
Params:
|
| 375 |
+
p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
|
| 376 |
+
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
|
| 377 |
+
validate_data_fn: points to a method that validates the corrct format of the submission
|
| 378 |
+
evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
|
| 379 |
+
"""
|
| 380 |
+
|
| 381 |
+
if (p == None):
|
| 382 |
+
p = dict([s[1:].split('=') for s in sys.argv[1:]])
|
| 383 |
+
if(len(sys.argv)<3):
|
| 384 |
+
print_help()
|
| 385 |
+
|
| 386 |
+
evalParams = default_evaluation_params_fn()
|
| 387 |
+
if 'p' in p.keys():
|
| 388 |
+
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p']) )
|
| 389 |
+
|
| 390 |
+
resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'}
|
| 391 |
+
try:
|
| 392 |
+
validate_data_fn(p['g'], p['s'], evalParams)
|
| 393 |
+
evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
|
| 394 |
+
resDict.update(evalData)
|
| 395 |
+
|
| 396 |
+
except Exception as e:
|
| 397 |
+
resDict['Message']= str(e)
|
| 398 |
+
resDict['calculated']=False
|
| 399 |
+
|
| 400 |
+
if 'o' in p:
|
| 401 |
+
if not os.path.exists(p['o']):
|
| 402 |
+
os.makedirs(p['o'])
|
| 403 |
+
|
| 404 |
+
resultsOutputname = p['o'] + '/results.zip'
|
| 405 |
+
outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
|
| 406 |
+
|
| 407 |
+
del resDict['per_sample']
|
| 408 |
+
if 'output_items' in resDict.keys():
|
| 409 |
+
del resDict['output_items']
|
| 410 |
+
|
| 411 |
+
outZip.writestr('method.json',json.dumps(resDict))
|
| 412 |
+
|
| 413 |
+
if not resDict['calculated']:
|
| 414 |
+
if show_result:
|
| 415 |
+
sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n')
|
| 416 |
+
if 'o' in p:
|
| 417 |
+
outZip.close()
|
| 418 |
+
return resDict
|
| 419 |
+
|
| 420 |
+
if 'o' in p:
|
| 421 |
+
if per_sample == True:
|
| 422 |
+
for k,v in evalData['per_sample'].items():
|
| 423 |
+
outZip.writestr( k + '.json',json.dumps(v))
|
| 424 |
+
|
| 425 |
+
if 'output_items' in evalData.keys():
|
| 426 |
+
for k, v in evalData['output_items'].items():
|
| 427 |
+
outZip.writestr( k,v)
|
| 428 |
+
|
| 429 |
+
outZip.close()
|
| 430 |
+
|
| 431 |
+
# if show_result:
|
| 432 |
+
# #sys.stdout.write("Calculated!")
|
| 433 |
+
# sys.stdout.write(json.dumps(resDict['method']))
|
| 434 |
+
|
| 435 |
+
return resDict
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
def main_validation(default_evaluation_params_fn,validate_data_fn):
|
| 439 |
+
"""
|
| 440 |
+
This process validates a method
|
| 441 |
+
Params:
|
| 442 |
+
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
|
| 443 |
+
validate_data_fn: points to a method that validates the corrct format of the submission
|
| 444 |
+
"""
|
| 445 |
+
try:
|
| 446 |
+
p = dict([s[1:].split('=') for s in sys.argv[1:]])
|
| 447 |
+
evalParams = default_evaluation_params_fn()
|
| 448 |
+
if 'p' in p.keys():
|
| 449 |
+
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p']) )
|
| 450 |
+
|
| 451 |
+
validate_data_fn(p['g'], p['s'], evalParams)
|
| 452 |
+
print ('SUCCESS')
|
| 453 |
+
sys.exit(0)
|
| 454 |
+
except Exception as e:
|
| 455 |
+
print (str(e))
|
| 456 |
+
sys.exit(101)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .judge_util import build_judge, DEBUG_MESSAGE
|
| 2 |
+
from .multiple_choice import extract_answer_from_item, prefetch_answer
|
| 3 |
+
from .vqa_eval import levenshtein_distance
|
| 4 |
+
from .spatial457 import Spatial457_utils
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
'build_judge', 'extract_answer_from_item', 'prefetch_answer',
|
| 9 |
+
'levenshtein_distance', 'DEBUG_MESSAGE',
|
| 10 |
+
'Spatial457_utils'
|
| 11 |
+
]
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/ocr_evaluator.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import json
|
| 4 |
+
import re
|
| 5 |
+
from collections import Counter
|
| 6 |
+
|
| 7 |
+
# local import
|
| 8 |
+
from .common import BaseMetric
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def token_normalize(token_text, is_lower=False, is_alphanum_only=False):
|
| 12 |
+
"""
|
| 13 |
+
"""
|
| 14 |
+
if is_lower:
|
| 15 |
+
token_text = token_text.lower()
|
| 16 |
+
if is_alphanum_only:
|
| 17 |
+
token_text = re.sub('[^A-Za-z0-9]+', '', token_text)
|
| 18 |
+
return token_text
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def text_normalize_and_tokenize(text, is_keep_blank=True, is_lower=True, is_alphanum_only=False):
|
| 22 |
+
text = text.replace("\t", " ").replace("\n", " ").replace("###", "").replace("***", "")
|
| 23 |
+
text = re.sub(r'\s+', ' ', text)
|
| 24 |
+
if not is_keep_blank:
|
| 25 |
+
text = text.replace(" ", "")
|
| 26 |
+
text_tokens = text.split(" ") if is_keep_blank else list(text)
|
| 27 |
+
text_token_normalized = [token_normalize(t, is_lower, is_alphanum_only) for t in text_tokens]
|
| 28 |
+
text_token_normalized = [x for x in text_token_normalized if len(x) > 0]
|
| 29 |
+
return text_token_normalized
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def evaluate_single_sample(gts, preds):
|
| 33 |
+
right_num = 0
|
| 34 |
+
gt_counter_info = dict(Counter(gts))
|
| 35 |
+
pdt_counter_info = dict(Counter(preds))
|
| 36 |
+
for gt_token, gt_count in gt_counter_info.items():
|
| 37 |
+
pred_count = pdt_counter_info.get(gt_token, 0)
|
| 38 |
+
right_num += min(gt_count, pred_count)
|
| 39 |
+
return right_num
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def calculate_metrics(response_info, gt_info, is_verbose=False):
|
| 43 |
+
"""
|
| 44 |
+
"""
|
| 45 |
+
macro_recall_list, macro_precision_list, macro_f1_list = [], [], []
|
| 46 |
+
total_gt_num, total_pred_num, total_right_num = 0, 0, 0
|
| 47 |
+
for file_name, fullbox_gts in gt_info.items():
|
| 48 |
+
fullbox_preds = response_info.get(file_name, [])
|
| 49 |
+
right_num = evaluate_single_sample(fullbox_gts, fullbox_preds)
|
| 50 |
+
total_right_num += right_num
|
| 51 |
+
total_gt_num += len(fullbox_gts)
|
| 52 |
+
total_pred_num += len(fullbox_preds)
|
| 53 |
+
|
| 54 |
+
macro_recall = right_num / (len(fullbox_gts) + 1e-9)
|
| 55 |
+
macro_precision = right_num / (len(fullbox_preds) + 1e-9)
|
| 56 |
+
macro_f1 = 2 * macro_recall * macro_precision / (macro_recall + macro_precision + 1e-9)
|
| 57 |
+
macro_recall_list.append(macro_recall)
|
| 58 |
+
macro_precision_list.append(macro_precision)
|
| 59 |
+
macro_f1_list.append(macro_f1)
|
| 60 |
+
|
| 61 |
+
# marco
|
| 62 |
+
final_macro_recall = sum(macro_recall_list) / (len(macro_recall_list) + 1e-9)
|
| 63 |
+
final_macro_precision = sum(macro_precision_list) / (len(macro_precision_list) + 1e-9)
|
| 64 |
+
final_macro_f1 = sum(macro_f1_list) / (len(macro_f1_list) + 1e-9)
|
| 65 |
+
|
| 66 |
+
# micro
|
| 67 |
+
recall_acc = total_right_num / (total_gt_num + 1e-9)
|
| 68 |
+
preci_acc = total_right_num / (total_pred_num + 1e-9)
|
| 69 |
+
hmean = 2 * recall_acc * preci_acc / (recall_acc + preci_acc + 1e-9)
|
| 70 |
+
vbs_eval_result = {
|
| 71 |
+
'macro_recall': final_macro_recall, 'macro_precision': final_macro_precision, 'macro_f1_score': final_macro_f1,
|
| 72 |
+
'micro_recall': recall_acc, 'micro_precision': preci_acc, 'mirco_f1_score': hmean
|
| 73 |
+
}
|
| 74 |
+
eval_result = vbs_eval_result if is_verbose else {'macro_f1_score': final_macro_f1, 'mirco_f1_score': hmean}
|
| 75 |
+
return eval_result
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class OcrEvaluator(BaseMetric):
|
| 79 |
+
def response_post_func(self, response_text, **kwargs):
|
| 80 |
+
return response_text
|
| 81 |
+
|
| 82 |
+
def evaluate(self, response_info, gt_info, **kwargs):
|
| 83 |
+
# hard code here
|
| 84 |
+
dataset_name = kwargs['dataset']
|
| 85 |
+
is_word_level, is_lower, is_alphanum_only = True, True, False
|
| 86 |
+
if dataset_name in ["Arabic", "Japanese", "Korean"] or "zh" in dataset_name:
|
| 87 |
+
is_word_level = False
|
| 88 |
+
if "multi_scene_ocr" in self.group_name and is_word_level:
|
| 89 |
+
is_alphanum_only = True
|
| 90 |
+
eval_config = {"word_level": is_word_level, "alphanum_only": is_alphanum_only, "lowercase": is_lower}
|
| 91 |
+
|
| 92 |
+
image_pdt_info, image_gt_info = {}, {}
|
| 93 |
+
for file_name, gt_src in gt_info.items():
|
| 94 |
+
pred_src = response_info.get(file_name, "")
|
| 95 |
+
pdt_token_list = text_normalize_and_tokenize(
|
| 96 |
+
str(pred_src).strip(), is_word_level, is_lower, is_alphanum_only)
|
| 97 |
+
gt_token_list = text_normalize_and_tokenize(
|
| 98 |
+
str(gt_src).strip(), is_word_level, is_lower, is_alphanum_only)
|
| 99 |
+
image_pdt_info[file_name] = pdt_token_list
|
| 100 |
+
image_gt_info[file_name] = gt_token_list
|
| 101 |
+
eval_result = calculate_metrics(image_pdt_info, image_gt_info, is_verbose=False)
|
| 102 |
+
return {"summary": eval_result, "metric_config": eval_config}
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
if __name__ == '__main__':
|
| 106 |
+
pass
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mathvista.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from ...utils import can_infer
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_gpt4_ICE():
|
| 9 |
+
example_1 = """
|
| 10 |
+
Hint: Please answer the question requiring an integer answer and provide the final value,
|
| 11 |
+
e.g., 1, 2, 3, at the end.\n
|
| 12 |
+
Question: Which number is missing?\n
|
| 13 |
+
Model response: The number missing in the sequence is 14.\n
|
| 14 |
+
Extracted answer: 14
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
example_2 = """
|
| 18 |
+
Hint: Please answer the question requiring a floating-point number with one decimal place and provide the final value,
|
| 19 |
+
e.g., 1.2, 1.3, 1.4, at the end.\n
|
| 20 |
+
Question: What is the fraction of females facing the camera?\n
|
| 21 |
+
Model response: The fraction of females facing the camera is 0.6,
|
| 22 |
+
which means that six out of ten females in the group are facing the camera.\n
|
| 23 |
+
Extracted answer: 0.6
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
example_3 = """
|
| 27 |
+
Hint: Please answer the question requiring a floating-point number with two decimal places and provide the final value,
|
| 28 |
+
e.g., 1.23, 1.34, 1.45, at the end.\n
|
| 29 |
+
Question: How much money does Luca need to buy a sour apple candy and a butter-scotch candy? (Unit: $)\n
|
| 30 |
+
Model response: Luca needs $1.45 to buy a sour apple candy and a butterscotch candy.\n
|
| 31 |
+
Extracted answer: 1.45
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
example_4 = """
|
| 35 |
+
Hint: Please answer the question requiring a Python list as an answer and provide the final list,
|
| 36 |
+
e.g., [1, 2, 3], [1.2, 1.3, 1.4], at the end.\n
|
| 37 |
+
Question: Between which two years does the line graph saw its maximum peak?\n
|
| 38 |
+
Model response: The line graph saw its maximum peak between 2007 and 2008.\n
|
| 39 |
+
Extracted answer: [2007, 2008]
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
example_5 = """
|
| 43 |
+
Hint: Please answer the question and provide the correct option letter, e.g., A, B, C, D, at the end.\n
|
| 44 |
+
Question: What fraction of the shape is blue?\n
|
| 45 |
+
Choices: (A) 3/11 (B) 8/11 (C) 6/11 (D) 3/5\n
|
| 46 |
+
Model response: The correct answer is (B) 8/11.\n
|
| 47 |
+
Extracted answer: B
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
return [example_1, example_2, example_3, example_4, example_5]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def build_mathvista_gpt4_prompt(line):
|
| 54 |
+
task_description = """
|
| 55 |
+
Please read the following example.
|
| 56 |
+
Then extract the answer from the model response and type it at the end of the prompt.\n
|
| 57 |
+
"""
|
| 58 |
+
question = line['question']
|
| 59 |
+
prediction = str(line['prediction'])
|
| 60 |
+
prompt = task_description
|
| 61 |
+
examples = get_gpt4_ICE()
|
| 62 |
+
for example in examples:
|
| 63 |
+
prompt += example + '\n'
|
| 64 |
+
prompt += question + '\n'
|
| 65 |
+
prompt += 'Model respone: ' + prediction
|
| 66 |
+
prompt += 'Extracted answer:'
|
| 67 |
+
return prompt
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def list_to_dict(lst):
|
| 71 |
+
return {chr(65 + i): val for i, val in enumerate(lst)}
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def post_check(line, prefetch=False):
|
| 75 |
+
res = None
|
| 76 |
+
ans = line['answer']
|
| 77 |
+
response = line['prediction'] if prefetch else line['res']
|
| 78 |
+
try:
|
| 79 |
+
if line['question_type'] == 'multi_choice':
|
| 80 |
+
ans = line['answer_option']
|
| 81 |
+
choices = list_to_dict(eval(line['choices']))
|
| 82 |
+
res = can_infer(response, choices)
|
| 83 |
+
if prefetch:
|
| 84 |
+
return res
|
| 85 |
+
else:
|
| 86 |
+
if line['answer_type'] == 'integer':
|
| 87 |
+
res = int(response)
|
| 88 |
+
ans = int(line['answer'])
|
| 89 |
+
elif line['answer_type'] == 'float':
|
| 90 |
+
res = float(response)
|
| 91 |
+
ans = float(line['answer'])
|
| 92 |
+
else:
|
| 93 |
+
res = str(res)
|
| 94 |
+
ans = str(ans)
|
| 95 |
+
except ValueError:
|
| 96 |
+
pass
|
| 97 |
+
|
| 98 |
+
if res == ans:
|
| 99 |
+
return res if prefetch else True
|
| 100 |
+
else:
|
| 101 |
+
return False
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def MathVista_auxeval(model, line):
|
| 105 |
+
prompt = build_mathvista_gpt4_prompt(line)
|
| 106 |
+
log = ''
|
| 107 |
+
retry = 5
|
| 108 |
+
if post_check(line, prefetch=True):
|
| 109 |
+
res = post_check(line, prefetch=True)
|
| 110 |
+
return dict(log='Prefetch succeed', res=res)
|
| 111 |
+
for i in range(retry):
|
| 112 |
+
prediction = line['prediction']
|
| 113 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 114 |
+
|
| 115 |
+
if FAIL_MSG in res:
|
| 116 |
+
log += f'Try {i}: output is {prediction}, failed to parse.\n'
|
| 117 |
+
else:
|
| 118 |
+
log += 'Succeed'
|
| 119 |
+
return dict(log=log, res=res)
|
| 120 |
+
log += 'All 5 retries failed.\n'
|
| 121 |
+
return dict(log=log, res='')
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def MathVista_acc(result_file):
|
| 125 |
+
data = load(result_file)
|
| 126 |
+
tot = defaultdict(lambda: 0)
|
| 127 |
+
fetch = defaultdict(lambda: 0)
|
| 128 |
+
hit = defaultdict(lambda: 0)
|
| 129 |
+
lt = len(data)
|
| 130 |
+
skill_list = []
|
| 131 |
+
for i in range(lt):
|
| 132 |
+
item = data.iloc[i]
|
| 133 |
+
cate = item['task']
|
| 134 |
+
tot['Overall'] += 1
|
| 135 |
+
try:
|
| 136 |
+
skills = eval(item['skills'])
|
| 137 |
+
except SyntaxError:
|
| 138 |
+
skills = [item['skills']]
|
| 139 |
+
for skill in skills:
|
| 140 |
+
if skill not in skill_list:
|
| 141 |
+
skill_list.append(skill)
|
| 142 |
+
tot[skill] += 1
|
| 143 |
+
tot[cate] += 1
|
| 144 |
+
if item['log'] == 'Prefetch succeed':
|
| 145 |
+
fetch['Overall'] += 1
|
| 146 |
+
fetch[cate] += 1
|
| 147 |
+
for skill in skills:
|
| 148 |
+
fetch[skill] += 1
|
| 149 |
+
if post_check(item, prefetch=False):
|
| 150 |
+
hit['Overall'] += 1
|
| 151 |
+
hit[cate] += 1
|
| 152 |
+
for skill in skills:
|
| 153 |
+
hit[skill] += 1
|
| 154 |
+
|
| 155 |
+
res = defaultdict(list)
|
| 156 |
+
for k in tot.keys():
|
| 157 |
+
res['Task&Skill'].append(k)
|
| 158 |
+
res['tot'].append(tot[k])
|
| 159 |
+
res['prefetch'].append(fetch[k])
|
| 160 |
+
res['hit'].append(hit[k])
|
| 161 |
+
res['prefetch_rate'].append(fetch[k] / tot[k] * 100)
|
| 162 |
+
res['acc'].append(hit[k] / tot[k] * 100)
|
| 163 |
+
res = pd.DataFrame(res)
|
| 164 |
+
return res
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_nbbox_iou_tuple_agg_jaccard.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .nbbox_iou import NbboxIouTuple
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class DictNbboxIouTupleAggJaccard:
|
| 5 |
+
"""Calculates the average precision IoU across the dict.
|
| 6 |
+
|
| 7 |
+
1. Calculates the precision IoU for all sets with the same key,
|
| 8 |
+
if it appears in either pred or targets
|
| 9 |
+
2. Calculates the total, then divides by the size of the union
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def match(cls, responses, targets) -> float:
|
| 14 |
+
"""Return the aggregated Jaccard index between targets and responses."""
|
| 15 |
+
if not isinstance(responses, dict):
|
| 16 |
+
return 0
|
| 17 |
+
all_keys = set(responses) | set(targets)
|
| 18 |
+
|
| 19 |
+
num_keys = 0
|
| 20 |
+
total_score = 0
|
| 21 |
+
for key in all_keys:
|
| 22 |
+
total_score += NbboxIouTuple.match(
|
| 23 |
+
responses.get(key, []), targets.get(key, [])
|
| 24 |
+
)
|
| 25 |
+
num_keys += 1
|
| 26 |
+
|
| 27 |
+
return total_score / num_keys
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/exact_str_match.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from ..parsing.common.utils import extract_code_block_content
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def parse_single_letter(s):
|
| 6 |
+
# Regular expression to match (A)XXXXX, A . XXXXXXX, or A.XXXXXX
|
| 7 |
+
match = re.match(r"^\(?([A-Za-z])\)?(?:\s*\.\s*|\.)?(.*)", s)
|
| 8 |
+
|
| 9 |
+
if match:
|
| 10 |
+
# Extract and return the single letter
|
| 11 |
+
return match.group(1)
|
| 12 |
+
else:
|
| 13 |
+
# Return the original string if no match is found
|
| 14 |
+
return s
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ExactStrMatch:
|
| 18 |
+
"""Exact string matching."""
|
| 19 |
+
|
| 20 |
+
@staticmethod
|
| 21 |
+
def match(response: str, correct_answer: str) -> int:
|
| 22 |
+
"""Exact match between targets and responses."""
|
| 23 |
+
if not isinstance(response, str):
|
| 24 |
+
response = str(response)
|
| 25 |
+
if not isinstance(correct_answer, str):
|
| 26 |
+
correct_answer = str(correct_answer)
|
| 27 |
+
|
| 28 |
+
if len(correct_answer) == 1 and correct_answer.isalpha() and len(response) > 1:
|
| 29 |
+
# handle special case of choice letter,
|
| 30 |
+
# drop the potential parenthesis
|
| 31 |
+
response = parse_single_letter(response)
|
| 32 |
+
|
| 33 |
+
return 1 if response == correct_answer else 0
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class CodeResultExactStrMatch:
|
| 37 |
+
"""Exact string matching, with the results from a results code block."""
|
| 38 |
+
|
| 39 |
+
@staticmethod
|
| 40 |
+
def match(response: str, correct_answer: str) -> int:
|
| 41 |
+
"""Exact match between targets and responses."""
|
| 42 |
+
correct_answer, is_code = extract_code_block_content(
|
| 43 |
+
correct_answer,
|
| 44 |
+
is_ascii_art=True,
|
| 45 |
+
should_remove_surrounding_whitespace=False,
|
| 46 |
+
)
|
| 47 |
+
# assert is_code
|
| 48 |
+
return ExactStrMatch.match(response, correct_answer)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/normalized_similarity_damerau_levenshtein.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import rapidfuzz
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class NormalizedSimilarityDamerauLevenshtein:
|
| 5 |
+
"""Normalized Damerau-Levenshtein Similarity."""
|
| 6 |
+
|
| 7 |
+
@staticmethod
|
| 8 |
+
def match(response, correct_answer) -> int:
|
| 9 |
+
"""Normalized indel similarityuiio do between targets and responses."""
|
| 10 |
+
if not isinstance(response, str) and isinstance(correct_answer, str):
|
| 11 |
+
return 0
|
| 12 |
+
return rapidfuzz.distance.DamerauLevenshtein.normalized_similarity(
|
| 13 |
+
response, correct_answer
|
| 14 |
+
)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mmif/__init__.py
ADDED
|
File without changes
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mmif/__pycache__/function_and_compare.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mmif/function_and_compare.py
ADDED
|
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
import re
|
| 3 |
+
from typing import List
|
| 4 |
+
import nltk
|
| 5 |
+
# from dotenv import load_dotenv
|
| 6 |
+
|
| 7 |
+
# load_dotenv()
|
| 8 |
+
|
| 9 |
+
# # nltk.download("punkt")
|
| 10 |
+
# nltk.data.path.append(
|
| 11 |
+
# os.environ["NLTK_DATA_PATH"]
|
| 12 |
+
# )
|
| 13 |
+
|
| 14 |
+
# HumanCheck: True
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def check_whether_response_paragraph_number_in_range(
|
| 18 |
+
response: str, lower_bound: int, upper_bound: int
|
| 19 |
+
) -> bool:
|
| 20 |
+
def clean_text(text: str) -> str:
|
| 21 |
+
return "\n".join(line.strip() for line in text.splitlines()).strip()
|
| 22 |
+
|
| 23 |
+
cleaned_response = clean_text(response)
|
| 24 |
+
|
| 25 |
+
# use re to check the number of paragraphs
|
| 26 |
+
paragraphs = [
|
| 27 |
+
p for p in re.split(
|
| 28 |
+
r"\n\s*\n",
|
| 29 |
+
cleaned_response) if p.strip()]
|
| 30 |
+
|
| 31 |
+
actual_count = len(paragraphs)
|
| 32 |
+
# print(actual_count)
|
| 33 |
+
|
| 34 |
+
return lower_bound <= actual_count <= upper_bound
|
| 35 |
+
|
| 36 |
+
# HumanCheck: True
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def check_whether_response_sentence_number_in_range(
|
| 40 |
+
response: str, lower_bound: int, upper_bound: int
|
| 41 |
+
) -> bool:
|
| 42 |
+
def clean_text(response: str) -> str:
|
| 43 |
+
return "\n".join(line.strip()
|
| 44 |
+
for line in response.splitlines()).strip()
|
| 45 |
+
|
| 46 |
+
response = clean_text(response)
|
| 47 |
+
|
| 48 |
+
# use nltk to split the response into sentences
|
| 49 |
+
sentences = nltk.sent_tokenize(response)
|
| 50 |
+
actual_count = len(sentences)
|
| 51 |
+
# print(actual_count)
|
| 52 |
+
|
| 53 |
+
return lower_bound <= actual_count <= upper_bound
|
| 54 |
+
|
| 55 |
+
# HumanCheck: True
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def check_whether_each_paragraph_sentence_number_in_range(
|
| 59 |
+
response: str, lower_bound: int, upper_bound: int
|
| 60 |
+
) -> bool:
|
| 61 |
+
def clean_text(text: str) -> str:
|
| 62 |
+
return "\n".join(line.strip() for line in text.splitlines()).strip()
|
| 63 |
+
|
| 64 |
+
cleaned_response = clean_text(response)
|
| 65 |
+
|
| 66 |
+
# use re to check the number of paragraphs
|
| 67 |
+
paragraphs = [
|
| 68 |
+
p for p in re.split(
|
| 69 |
+
r"\n\s*\n",
|
| 70 |
+
cleaned_response) if p.strip()]
|
| 71 |
+
|
| 72 |
+
for i, paragraph in enumerate(paragraphs):
|
| 73 |
+
# use nltk to split the paragraph into sentences
|
| 74 |
+
sentences = nltk.sent_tokenize(paragraph)
|
| 75 |
+
actual_count = len(sentences)
|
| 76 |
+
# print(f"paragraph {i}: {actual_count}")
|
| 77 |
+
if actual_count < lower_bound or actual_count > upper_bound:
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
return True
|
| 81 |
+
|
| 82 |
+
# HumanCheck: True
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def check_whether_each_paragraph_sentence_number_in_range_list(
|
| 86 |
+
response: str, ranges: List[List[int]]
|
| 87 |
+
) -> bool:
|
| 88 |
+
def clean_text(text: str) -> str:
|
| 89 |
+
return "\n".join(line.strip() for line in text.splitlines()).strip()
|
| 90 |
+
|
| 91 |
+
cleaned_response = clean_text(response)
|
| 92 |
+
|
| 93 |
+
# use re to check the number of paragraphs
|
| 94 |
+
paragraphs = [
|
| 95 |
+
p for p in re.split(
|
| 96 |
+
r"\n\s*\n",
|
| 97 |
+
cleaned_response) if p.strip()]
|
| 98 |
+
|
| 99 |
+
if len(paragraphs) != len(ranges):
|
| 100 |
+
return False
|
| 101 |
+
|
| 102 |
+
for i, (paragraph, range_pair) in enumerate(zip(paragraphs, ranges)):
|
| 103 |
+
lower_bound, upper_bound = range_pair
|
| 104 |
+
sentences = nltk.sent_tokenize(paragraph)
|
| 105 |
+
actual_count = len(sentences)
|
| 106 |
+
# print(f"paragraph {i}: {actual_count}")
|
| 107 |
+
if not (lower_bound <= actual_count <= upper_bound):
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
return True
|
| 111 |
+
|
| 112 |
+
# HumanCheck: True
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def check_whether_response_word_count_in_range(
|
| 116 |
+
response: str, lower_bound: int, upper_bound: int
|
| 117 |
+
) -> bool:
|
| 118 |
+
# this line is used to filter out all non-word characters
|
| 119 |
+
response_clean = re.sub(r"[^\w\s.-]", "", response)
|
| 120 |
+
word_list = response_clean.split()
|
| 121 |
+
word_count = len(word_list)
|
| 122 |
+
# print(word_count)
|
| 123 |
+
return lower_bound <= word_count <= upper_bound
|
| 124 |
+
|
| 125 |
+
# HumanCheck: True
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def check_whether_each_paragraph_word_count_in_range(
|
| 129 |
+
response: str, lower_bound: int, upper_bound: int
|
| 130 |
+
) -> bool:
|
| 131 |
+
# Check whether the number of words in each paragraph of the response is greater than or equal to lower_bound and less than or equal to upper_bound.
|
| 132 |
+
# Here are some examples of calling this function based on constraints:
|
| 133 |
+
# If the constraint requires that the number of words in each paragraph
|
| 134 |
+
# should be between 50 and 80, then lower_bound = 50 and upper_bound = 80.
|
| 135 |
+
def clean_text(text: str) -> str:
|
| 136 |
+
return "\n".join(line.strip() for line in text.splitlines()).strip()
|
| 137 |
+
|
| 138 |
+
cleaned_response = clean_text(response)
|
| 139 |
+
|
| 140 |
+
# use re to check the number of paragraphs
|
| 141 |
+
paragraphs = [
|
| 142 |
+
p for p in re.split(
|
| 143 |
+
r"\n\s*\n",
|
| 144 |
+
cleaned_response) if p.strip()]
|
| 145 |
+
|
| 146 |
+
for i, paragraph in enumerate(paragraphs):
|
| 147 |
+
paragraph_clean = re.sub(r"[^\w\s.-]", "", paragraph)
|
| 148 |
+
word_count = len(paragraph_clean.split())
|
| 149 |
+
# print(f"paragraph {i} word count: {word_count}")
|
| 150 |
+
if not (lower_bound <= word_count <= upper_bound):
|
| 151 |
+
return False
|
| 152 |
+
|
| 153 |
+
return True
|
| 154 |
+
|
| 155 |
+
# HumanCheck: True
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def check_whether_whole_response_not_contain_certain_substrings(
|
| 159 |
+
response: str, substrings: List[str]
|
| 160 |
+
) -> bool:
|
| 161 |
+
# Check whether the entire response does not contain any of the specified substrings.
|
| 162 |
+
# Here are some examples of calling this function based on constraints:
|
| 163 |
+
# If the constraint requires that the response should not contain the
|
| 164 |
+
# words "apple" and "banana", then substrings = ["apple", "banana"].
|
| 165 |
+
return all(substring not in response for substring in substrings)
|
| 166 |
+
|
| 167 |
+
# HumanCheck: True
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def check_whether_whole_response_not_contain_certain_substring(
|
| 171 |
+
response: str, substring: str
|
| 172 |
+
) -> bool:
|
| 173 |
+
return substring not in response
|
| 174 |
+
|
| 175 |
+
# HumanCheck: True
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def check_whether_each_sentence_begin_with_certain_substring(
|
| 179 |
+
response: str, substring: str
|
| 180 |
+
) -> bool:
|
| 181 |
+
# Check whether each sentence in the response starts with the specified substring.
|
| 182 |
+
# Here are some examples of calling this function based on constraints:
|
| 183 |
+
# If the constraint requires that each sentence should start with
|
| 184 |
+
# exclamation point, then substring = "!".
|
| 185 |
+
def clean_text(response: str) -> str:
|
| 186 |
+
return "\n".join(line.strip()
|
| 187 |
+
for line in response.splitlines()).strip()
|
| 188 |
+
|
| 189 |
+
response = clean_text(response)
|
| 190 |
+
|
| 191 |
+
sentences = nltk.sent_tokenize(response)
|
| 192 |
+
|
| 193 |
+
return all(sentence.startswith(substring) for sentence in sentences)
|
| 194 |
+
|
| 195 |
+
# HumanCheck: True
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def check_whether_each_paragraph_begin_with_certain_substring(
|
| 199 |
+
response: str, substring: str
|
| 200 |
+
) -> bool:
|
| 201 |
+
def clean_text(response: str) -> str:
|
| 202 |
+
return "\n".join(line.strip()
|
| 203 |
+
for line in response.splitlines()).strip()
|
| 204 |
+
|
| 205 |
+
cleaned_response = clean_text(response)
|
| 206 |
+
|
| 207 |
+
paragraphs = [
|
| 208 |
+
p for p in re.split(
|
| 209 |
+
r"\n\s*\n",
|
| 210 |
+
cleaned_response) if p.strip()]
|
| 211 |
+
|
| 212 |
+
return all(paragraph.startswith(substring) for paragraph in paragraphs)
|
| 213 |
+
|
| 214 |
+
# HumanCheck: True
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def check_whether_each_paragraph_end_with_certain_substring(
|
| 218 |
+
response: str, substring: str
|
| 219 |
+
) -> bool:
|
| 220 |
+
def clean_text(response: str) -> str:
|
| 221 |
+
return "\n".join(line.strip()
|
| 222 |
+
for line in response.splitlines()).strip()
|
| 223 |
+
|
| 224 |
+
cleaned_response = clean_text(response)
|
| 225 |
+
|
| 226 |
+
paragraphs = [
|
| 227 |
+
p for p in re.split(
|
| 228 |
+
r"\n\s*\n",
|
| 229 |
+
cleaned_response) if p.strip()]
|
| 230 |
+
|
| 231 |
+
return all(paragraph.endswith(substring) for paragraph in paragraphs)
|
| 232 |
+
|
| 233 |
+
# HumanCheck: True
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def check_whether_each_sentence_end_with_certain_substring(
|
| 237 |
+
response: str, substring: str
|
| 238 |
+
) -> bool:
|
| 239 |
+
def clean_text(response: str) -> str:
|
| 240 |
+
return "\n".join(line.strip()
|
| 241 |
+
for line in response.splitlines()).strip()
|
| 242 |
+
|
| 243 |
+
response = clean_text(response)
|
| 244 |
+
|
| 245 |
+
sentences = nltk.sent_tokenize(response)
|
| 246 |
+
|
| 247 |
+
return all(sentence.endswith(substring) for sentence in sentences)
|
| 248 |
+
|
| 249 |
+
# HumanCheck: True
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def check_whether_whole_response_begin_with_certain_substring(
|
| 253 |
+
response: str, substring: str
|
| 254 |
+
) -> bool:
|
| 255 |
+
return response.strip().startswith(substring)
|
| 256 |
+
|
| 257 |
+
# HumanCheck: True
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def check_whether_whole_response_end_with_certain_substring(
|
| 261 |
+
response: str, substring: str
|
| 262 |
+
) -> bool:
|
| 263 |
+
return response.strip().endswith(substring)
|
| 264 |
+
|
| 265 |
+
# HumanCheck: True
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def check_whether_each_keyword_in_list_metioned_in_range(
|
| 269 |
+
response: str,
|
| 270 |
+
keywords: List[str],
|
| 271 |
+
lower_bound_times: int,
|
| 272 |
+
upper_bound_times: int) -> bool:
|
| 273 |
+
# should notice case like "Reddit" is counted as "Redditor"
|
| 274 |
+
def clean_text(response: str) -> str:
|
| 275 |
+
return "\n".join(line.strip()
|
| 276 |
+
for line in response.splitlines()).strip()
|
| 277 |
+
|
| 278 |
+
response = clean_text(response)
|
| 279 |
+
response_lower = response.lower()
|
| 280 |
+
|
| 281 |
+
for keyword in keywords:
|
| 282 |
+
# use \b to match the whole word
|
| 283 |
+
pattern = r'\b' + re.escape(keyword.lower()) + r'\b'
|
| 284 |
+
matches = re.findall(pattern, response_lower)
|
| 285 |
+
if len(matches) < lower_bound_times or len(
|
| 286 |
+
matches) > upper_bound_times:
|
| 287 |
+
return False
|
| 288 |
+
|
| 289 |
+
return True
|
| 290 |
+
|
| 291 |
+
# HumanCheck: True
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def check_whether_total_keyword_in_list_metioned_in_range(
|
| 295 |
+
response: str,
|
| 296 |
+
keywords: List[str],
|
| 297 |
+
lower_bound_times: int,
|
| 298 |
+
upper_bound_times: int) -> bool:
|
| 299 |
+
# should notice case like "Reddit" is counted as "Redditor"
|
| 300 |
+
def clean_text(response: str) -> str:
|
| 301 |
+
return "\n".join(line.strip()
|
| 302 |
+
for line in response.splitlines()).strip()
|
| 303 |
+
|
| 304 |
+
response = clean_text(response)
|
| 305 |
+
response_lower = response.lower()
|
| 306 |
+
|
| 307 |
+
count = 0
|
| 308 |
+
for keyword in keywords:
|
| 309 |
+
pattern = r'\b' + re.escape(keyword.lower()) + r'\b'
|
| 310 |
+
matches = re.findall(pattern, response_lower)
|
| 311 |
+
count += len(matches)
|
| 312 |
+
|
| 313 |
+
return lower_bound_times <= count <= upper_bound_times
|
| 314 |
+
|
| 315 |
+
# HumanCheck: True
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def check_percentage_number_precision_in_response(
|
| 319 |
+
response: str, precision: int) -> bool:
|
| 320 |
+
# All numeric values that appear before a percentage sign (%) must be
|
| 321 |
+
# rounded and retained to two decimal places.
|
| 322 |
+
pattern = r'(\d+\.\d+|\d+)\s*%' # allow numbers and % to have spaces
|
| 323 |
+
|
| 324 |
+
matches = re.findall(pattern, response)
|
| 325 |
+
|
| 326 |
+
for num_str in matches:
|
| 327 |
+
if '.' not in num_str:
|
| 328 |
+
# no decimal point, not a float number
|
| 329 |
+
return False
|
| 330 |
+
decimal_part = num_str.split('.')[1]
|
| 331 |
+
if len(decimal_part) != precision:
|
| 332 |
+
return False
|
| 333 |
+
|
| 334 |
+
return True
|
| 335 |
+
|
| 336 |
+
# HumanCheck: True
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def check_number_precision_in_response(response: str, precision: int) -> bool:
|
| 340 |
+
# Regex pattern to extract numbers, including scientific notation and
|
| 341 |
+
# percentages
|
| 342 |
+
number_pattern = r'''
|
| 343 |
+
(?<!\w) # Not preceded by a word character
|
| 344 |
+
[+-]? # Optional sign
|
| 345 |
+
(?: # Number formats:
|
| 346 |
+
\d{1,3}(?:,\d{3})*(?:\.\d+)? # e.g., 1,234.56
|
| 347 |
+
| \d+\.\d+ # e.g., 123.456
|
| 348 |
+
| \.\d+ # e.g., .456
|
| 349 |
+
| \d+ # e.g., 123
|
| 350 |
+
)
|
| 351 |
+
(?:[eE][+-]?\d+)? # Optional scientific notation
|
| 352 |
+
%? # Optional percentage
|
| 353 |
+
(?!\w) # Not followed by a word character
|
| 354 |
+
'''
|
| 355 |
+
|
| 356 |
+
matches = re.finditer(number_pattern, response, flags=re.VERBOSE)
|
| 357 |
+
|
| 358 |
+
for match in matches:
|
| 359 |
+
num_str = match.group()
|
| 360 |
+
clean_num = num_str.replace(',', '').rstrip('%')
|
| 361 |
+
|
| 362 |
+
# Split out mantissa if scientific notation
|
| 363 |
+
if 'e' in clean_num.lower():
|
| 364 |
+
mantissa = re.split('[eE]', clean_num)[0]
|
| 365 |
+
else:
|
| 366 |
+
mantissa = clean_num
|
| 367 |
+
|
| 368 |
+
# Check digits after decimal in mantissa
|
| 369 |
+
if '.' in mantissa:
|
| 370 |
+
decimal_part = mantissa.split('.')[-1]
|
| 371 |
+
if len(decimal_part) != precision:
|
| 372 |
+
return False
|
| 373 |
+
else:
|
| 374 |
+
if precision != 0:
|
| 375 |
+
return False
|
| 376 |
+
|
| 377 |
+
return True
|
| 378 |
+
|
| 379 |
+
# HumanCheck: True
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def check_whether_has_no_arabic_number_in_response(response: str) -> bool:
|
| 383 |
+
number_pattern = r"""
|
| 384 |
+
(?<![.\w]) # Ensure no preceding . or word char
|
| 385 |
+
(?: # Start of number pattern
|
| 386 |
+
\d{1,3}(?:,\d{3})+(?:\.\d+)?%? | # 1,000 or 1,000.00 or 1,000%
|
| 387 |
+
\d+\.\d+%? | # decimals: 3.14, 0.5%
|
| 388 |
+
\d+%? | # integers: 100, 100%
|
| 389 |
+
\d+(?:\.\d+)?(?:[eE][+-]?\d+) # scientific: 5e-10, 5.09e-10
|
| 390 |
+
)
|
| 391 |
+
(?![.\w]) # Ensure no trailing . or word char
|
| 392 |
+
"""
|
| 393 |
+
numbers = re.findall(
|
| 394 |
+
number_pattern,
|
| 395 |
+
response,
|
| 396 |
+
flags=re.IGNORECASE | re.VERBOSE)
|
| 397 |
+
# print(numbers)
|
| 398 |
+
return len(numbers) == 0
|
| 399 |
+
|
| 400 |
+
# HumanCheck: True
|
| 401 |
+
# def check_scientific_notation_precision_in_response(
|
| 402 |
+
# response: str, significant_digits: int
|
| 403 |
+
# ) -> bool:
|
| 404 |
+
# scientific_pattern = r"(?<!\w)(?:\d+\.\d+|\d+)(?:[eE][+-]?\d+)(?!\w)"
|
| 405 |
+
|
| 406 |
+
# numbers = re.findall(scientific_pattern, response)
|
| 407 |
+
|
| 408 |
+
# for number in numbers:
|
| 409 |
+
# # Split into base and exponent
|
| 410 |
+
# parts = re.split(r"[eE]", number.lower())
|
| 411 |
+
# if len(parts) != 2:
|
| 412 |
+
# continue # Skip invalid scientific notation
|
| 413 |
+
|
| 414 |
+
# base, exponent = parts
|
| 415 |
+
|
| 416 |
+
# # Handle cases like "0.000" or "0"
|
| 417 |
+
# if all(c == "0" for c in base.replace(".", "")):
|
| 418 |
+
# base_digits = "0" # Treat as 0 with 1 significant digit
|
| 419 |
+
# else:
|
| 420 |
+
# # Remove leading and trailing zeros (but keep significant zeros)
|
| 421 |
+
# base_digits = base.replace(".", "").lstrip("0") or "0"
|
| 422 |
+
|
| 423 |
+
# significant_count = len(base_digits)
|
| 424 |
+
# print(f"Number: {number}, Significant digits: {significant_count}")
|
| 425 |
+
|
| 426 |
+
# if significant_count != significant_digits:
|
| 427 |
+
# return False
|
| 428 |
+
|
| 429 |
+
# return True
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/tempcompass.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from .multiple_choice import extract_answer_from_item
|
| 3 |
+
from PIL import Image, ImageOps
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
sys_prompt = "You are an AI assistant for question answering."
|
| 7 |
+
|
| 8 |
+
system_prompt_multi_choice = (
|
| 9 |
+
"You will receive a multi-choice question, the ground-truth answer and the prediction from a question answering (QA) model. " # noqa
|
| 10 |
+
"Your task is to determine whether QA model prediction is correct, based on the question and ground-truth answer. "
|
| 11 |
+
"If the prediction is correct, respond \"Correct\". If the prediction is incorrect, respond \"Incorrect\"."
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
system_prompt_caption_matching = (
|
| 15 |
+
"You will receive a caption matching question, the ground-truth answer and the prediction from a question answering (QA) model. " # noqa
|
| 16 |
+
"Your task is to determine whether QA model prediction is correct, based on the question and ground-truth answer. "
|
| 17 |
+
"If the prediction is correct, respond \"Correct\". If the prediction is incorrect, respond \"Incorrect\"."
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
system_prompt_captioning = """
|
| 21 |
+
You will receive a video description and a multi-choice question. Your task is to choose the correct answer and briefly explain the reason why you choose the answer. \
|
| 22 |
+
If none of the choice candidates are correct or the video description lacks enough information to answer the question, just answer "None of the choices are correct". \
|
| 23 |
+
Please organize your response in this format:
|
| 24 |
+
```
|
| 25 |
+
Reasoning: [Your reason to obtain the answer]
|
| 26 |
+
Answer: [Your answer]
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
Here are some examples of video description, multi-choice question and the expected answer:
|
| 30 |
+
```
|
| 31 |
+
Video Description: A person is palying football.
|
| 32 |
+
Multi-Choice Question:
|
| 33 |
+
What is the person doing in the video?
|
| 34 |
+
A. cooking
|
| 35 |
+
B. palying football
|
| 36 |
+
C. playing basketball
|
| 37 |
+
D. reading book
|
| 38 |
+
Reasoning: The video description mentions that the person is playing football.
|
| 39 |
+
Answer: B. palying football
|
| 40 |
+
|
| 41 |
+
Video Description: A bird is flying clockwise.
|
| 42 |
+
Multi-Choice Question:
|
| 43 |
+
In which direction is the bird flying?
|
| 44 |
+
A. backwark
|
| 45 |
+
B. counter-clockwise
|
| 46 |
+
C. clockwise
|
| 47 |
+
D. downward
|
| 48 |
+
Reasoning: The video description mentions that the bird is flying clockwise
|
| 49 |
+
Answer: C. clockwise
|
| 50 |
+
|
| 51 |
+
Video Description: An air balloon is inflating.
|
| 52 |
+
Multi-Choice Question:
|
| 53 |
+
What is happening to the air balloon?
|
| 54 |
+
A. exploding
|
| 55 |
+
B. getting smaller
|
| 56 |
+
C. flying
|
| 57 |
+
Reasoning: The video description mentions that the air balloon is inflating, while none of the coices can be explained as inflating.
|
| 58 |
+
Answer: None of the choices are correct
|
| 59 |
+
```
|
| 60 |
+
""" # noqa
|
| 61 |
+
|
| 62 |
+
system_prompt_YorN = """
|
| 63 |
+
You will receive a Yes/No question, the ground-truth answer and the prediction from a question answering (QA) model. \
|
| 64 |
+
Your task is to determine whether QA model prediction is correct, based on the question and ground-truth answer. \
|
| 65 |
+
If the prediction is correct, respond "Correct". If the prediction is incorrect, respond "Incorrect".
|
| 66 |
+
""" # noqa
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def eval_rule_caption_matching(line):
|
| 70 |
+
# Determine whether the video llm output is correct, based on word matching rules
|
| 71 |
+
video_llm_output = line['prediction']
|
| 72 |
+
answer = line['answer']
|
| 73 |
+
option_strs = eval(line['candidates']) # complete option strings
|
| 74 |
+
option_sents = [opt.split(': ')[1] for opt in option_strs] # option sentence
|
| 75 |
+
# option index, e.g., Sentence A, Caption A, Option 1
|
| 76 |
+
option_inds = [opt.split(': ')[0] for opt in option_strs] + [opt.split(': ')[0].replace('Sentence ', '').replace('Option ', '').replace('Caption ', '') for opt in option_strs] # noqa
|
| 77 |
+
video_llm_pred = None
|
| 78 |
+
for option_str in option_strs:
|
| 79 |
+
if option_str == video_llm_output:
|
| 80 |
+
video_llm_pred = option_str
|
| 81 |
+
for option_sent in option_sents:
|
| 82 |
+
if option_sent == video_llm_output or (') ' in video_llm_output and option_sent == video_llm_output.split(') ')[1]): # noqa
|
| 83 |
+
video_llm_pred = option_sent
|
| 84 |
+
for option_ind in option_inds:
|
| 85 |
+
if option_ind == video_llm_output or option_ind == video_llm_output.replace('.', ''): # noqa
|
| 86 |
+
video_llm_pred = option_ind
|
| 87 |
+
|
| 88 |
+
if video_llm_pred is None:
|
| 89 |
+
return "fail"
|
| 90 |
+
else:
|
| 91 |
+
return 1 if video_llm_pred == answer or video_llm_pred == answer.split(":")[0] or video_llm_pred == answer.split(": ")[1] or video_llm_pred == answer.split(": ")[0].split()[1] else 0 # noqa
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def eval_rule_multi_choice(line):
|
| 95 |
+
if line['prediction'] == line['answer']:
|
| 96 |
+
return 1
|
| 97 |
+
elif line['prediction'] in ['A', 'B', 'C', 'D']:
|
| 98 |
+
return 1 if line['prediction'] == line['answer'][0] else 0
|
| 99 |
+
elif any(line['prediction'].startswith(prefix) for prefix in ['A.', 'B.', 'C.', 'D.']):
|
| 100 |
+
return 1 if line['prediction'].split('.')[0] == line['answer'][0] else 0
|
| 101 |
+
elif any(line['prediction'].startswith(prefix) for prefix in ['A)', 'B)', 'C)', 'D)']):
|
| 102 |
+
return 1 if line['prediction'].split(')')[0] == line['answer'][0] else 0
|
| 103 |
+
else:
|
| 104 |
+
return "fail"
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def eval_rule_YorN(video_llm_output):
|
| 108 |
+
# Extract the yes/no predction from the original video llm output
|
| 109 |
+
video_llm_output = video_llm_output.lower()
|
| 110 |
+
if video_llm_output.startswith("yes"):
|
| 111 |
+
return "yes"
|
| 112 |
+
elif video_llm_output.startswith("no"):
|
| 113 |
+
return "no"
|
| 114 |
+
else:
|
| 115 |
+
return False
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def llm_output_to_rating(llm_output):
|
| 119 |
+
if not ('Correct' in llm_output or 'Incorrect' in llm_output):
|
| 120 |
+
print(f"Warning: LLM output is not in the correct format: {llm_output}")
|
| 121 |
+
rating = 0
|
| 122 |
+
return rating
|
| 123 |
+
if llm_output.startswith('Correct'):
|
| 124 |
+
rating = 1
|
| 125 |
+
elif llm_output.startswith('Incorrect'):
|
| 126 |
+
rating = 0
|
| 127 |
+
elif ('Correct' in llm_output) and ('Incorrect' not in llm_output):
|
| 128 |
+
rating = 1
|
| 129 |
+
elif 'Incorrect' in llm_output:
|
| 130 |
+
rating = 0
|
| 131 |
+
return rating
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def parse_llm_output(llm_output, gt_answer):
|
| 135 |
+
if llm_output == "invalid_request_error" or not llm_output:
|
| 136 |
+
eval_result = {"rating": -1, "chatgpt-answer": None, "chatgpt-reasoning": None}
|
| 137 |
+
return eval_result
|
| 138 |
+
|
| 139 |
+
eval_result = {}
|
| 140 |
+
lines = llm_output.split("\n")
|
| 141 |
+
|
| 142 |
+
for line in lines:
|
| 143 |
+
line = line.strip()
|
| 144 |
+
if "Reasoning" in line:
|
| 145 |
+
eval_result['chatgpt-reasoning'] = line.replace("Reasoning:", "").strip()
|
| 146 |
+
if "Answer" in line:
|
| 147 |
+
eval_result['chatgpt-answer'] = line.replace("Answer:", "").strip()
|
| 148 |
+
|
| 149 |
+
if "chatgpt-answer" not in eval_result:
|
| 150 |
+
eval_result['chatgpt-answer'] = llm_output
|
| 151 |
+
if "chatgpt-reasoning" not in eval_result:
|
| 152 |
+
eval_result['chatgpt-reasoning'] = None
|
| 153 |
+
|
| 154 |
+
# Check if the chatgpt answer is the ground-truth answer
|
| 155 |
+
# calculate the number of 'A.', 'B.', 'C.', 'D.' in chatgpt-answer
|
| 156 |
+
answer_counts = sum(eval_result['chatgpt-answer'].count(prefix) for prefix in ['A.', 'B.', 'C.', 'D.']) # noqa
|
| 157 |
+
if eval_result['chatgpt-answer'].split(". ")[0] == gt_answer.split(". ")[0] and answer_counts == 1:
|
| 158 |
+
eval_result['rating'] = 1
|
| 159 |
+
else:
|
| 160 |
+
eval_result['rating'] = 0
|
| 161 |
+
return eval_result
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def evaluate_tempcompass_mcq(model, line):
|
| 165 |
+
eval_rules_dict = {
|
| 166 |
+
'caption_matching': eval_rule_caption_matching,
|
| 167 |
+
'multi-choice': eval_rule_multi_choice
|
| 168 |
+
}
|
| 169 |
+
gpt_eval_prompt = {
|
| 170 |
+
'multi-choice': '{}\nMulti-Choice Question:\n{}\nGround-Truth Answer: {}\nModel Prediction: {}',
|
| 171 |
+
'caption_matching': '{}\nCaption Matching Question:\n{}\nGround-Truth Answer: {}\nModel Prediction: {}'
|
| 172 |
+
}
|
| 173 |
+
base_prompt = {
|
| 174 |
+
'multi-choice': system_prompt_multi_choice,
|
| 175 |
+
'caption_matching': system_prompt_caption_matching
|
| 176 |
+
}
|
| 177 |
+
eval_result = {
|
| 178 |
+
"question": line['question'],
|
| 179 |
+
"answer": line['answer'],
|
| 180 |
+
"prediction": line['prediction'],
|
| 181 |
+
"task_type": line['task_type'],
|
| 182 |
+
"candidates": line['candidates'],
|
| 183 |
+
"match_success": True
|
| 184 |
+
}
|
| 185 |
+
result = eval_rules_dict[line['task_type']](line)
|
| 186 |
+
if result == "fail":
|
| 187 |
+
eval_result['match_success'] = False
|
| 188 |
+
if model is None:
|
| 189 |
+
eval_result['rating'] = 0
|
| 190 |
+
else:
|
| 191 |
+
prompt_template = gpt_eval_prompt[line['task_type']]
|
| 192 |
+
prompt = prompt_template.format(base_prompt[line['task_type']], line['question'], line['answer'], line['prediction']) # noqa
|
| 193 |
+
llm_output = model.generate(prompt)
|
| 194 |
+
result = llm_output_to_rating(llm_output)
|
| 195 |
+
eval_result['chatgpt-response'] = llm_output
|
| 196 |
+
eval_result['rating'] = result
|
| 197 |
+
else:
|
| 198 |
+
eval_result['rating'] = result
|
| 199 |
+
|
| 200 |
+
return eval_result
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def evaluate_tempcompass_captioning(model, line):
|
| 204 |
+
prompt = (
|
| 205 |
+
f"{system_prompt_captioning}\n"
|
| 206 |
+
f"Video Description:{line['prediction']}\n"
|
| 207 |
+
f"Multi-Choice Question:\n{line['mc_question']}\n"
|
| 208 |
+
)
|
| 209 |
+
if model is not None:
|
| 210 |
+
llm_output = model.generate(prompt)
|
| 211 |
+
eval_result = parse_llm_output(llm_output, gt_answer=line['mc_answer'])
|
| 212 |
+
return eval_result
|
| 213 |
+
else:
|
| 214 |
+
raise ValueError("Model is None, TempCompass Captioning task not supported exact matching") # noqa
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def evaluate_tempcompass_YorN(model, line):
|
| 218 |
+
prompt = (
|
| 219 |
+
f"{system_prompt_YorN}\n"
|
| 220 |
+
f"Yes/No Question:\n{line['question']}\n"
|
| 221 |
+
f"Ground-Truth Answer: {line['answer']}\n"
|
| 222 |
+
f"Model Prediction: {line['prediction']}"
|
| 223 |
+
)
|
| 224 |
+
result = eval_rule_YorN(line['prediction'])
|
| 225 |
+
eval_result = {
|
| 226 |
+
"question": line['question'],
|
| 227 |
+
"answer": line['answer'],
|
| 228 |
+
"prediction": line['prediction'],
|
| 229 |
+
"match_success": True
|
| 230 |
+
}
|
| 231 |
+
if result:
|
| 232 |
+
eval_result['rating'] = 1 if result == line['answer'] else 0
|
| 233 |
+
elif model is None:
|
| 234 |
+
eval_result['match_success'] = False
|
| 235 |
+
eval_result['rating'] = 0
|
| 236 |
+
else:
|
| 237 |
+
eval_result['match_success'] = False
|
| 238 |
+
llm_output = model.generate(prompt)
|
| 239 |
+
result = llm_output_to_rating(llm_output)
|
| 240 |
+
eval_result['chatgpt-response'] = llm_output
|
| 241 |
+
eval_result['rating'] = result
|
| 242 |
+
return eval_result
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def get_dimension_rating(score_file):
|
| 246 |
+
data = load(score_file)
|
| 247 |
+
result_dict = {}
|
| 248 |
+
for idx, item in data.iterrows():
|
| 249 |
+
dict_key = item['dim'] + '. ' + item['task_type']
|
| 250 |
+
if dict_key not in result_dict:
|
| 251 |
+
result_dict[dict_key] = [0,0]
|
| 252 |
+
result_dict[dict_key][0] += int(item['score'])
|
| 253 |
+
result_dict[dict_key][1] += 1
|
| 254 |
+
return result_dict
|
VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/eagle_x.cpython-310.pyc
ADDED
|
Binary file (5.09 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/falcon_vlm.cpython-310.pyc
ADDED
|
Binary file (1.62 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llava_uhd_siglip2.cpython-310.pyc
ADDED
|
Binary file (7.14 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/parrot.cpython-310.pyc
ADDED
|
Binary file (7.5 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/visualglm.cpython-310.pyc
ADDED
|
Binary file (1.46 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vlaa_thinker.cpython-310.pyc
ADDED
|
Binary file (5.38 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/wethink_vl.cpython-310.pyc
ADDED
|
Binary file (5.23 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/hawk_vl/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .model import HawkVL
|
| 2 |
+
from .prompt import HawkVLPromptMixin
|
VLMEvalKit-sudoku/vlmeval/vlm/hawk_vl/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (236 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/vlm/hawk_vl/__pycache__/model.cpython-310.pyc
ADDED
|
Binary file (4.62 kB). View file
|
|
|